首页 > 代码库 > 设置hdfs和hbase副本数。hadoop2.5.2 hbase0.98.6

设置hdfs和hbase副本数。hadoop2.5.2 hbase0.98.6

hdfs副本和基本读写。

core-site.xml
hdfs-site.xml

从/etc/hdfs1/conf下拷贝到工作空间


import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
// * hadoop2.5.2
public class CopyOfHadoopDFSFileReadWrite {
 
    static void printAndExit(String str) {
        System.err.println(str);
        System.exit(1);
    }

    public static void main (String[] argv) throws IOException {
        Configuration conf = new Configuration();   
        FileSystem fs = FileSystem.get(conf);
        argv=new String[]{"/tmp/hello.txt"};
        Path outFile = new Path(argv[0]);
        if (fs.exists(outFile))
            printAndExit("Output already exists");
        FSDataOutputStream out = fs.create(outFile,(short)2); 
        try {
            out.write("hello 扒拉扒拉了吧啦啦啦不".getBytes());
        } catch (IOException e) {
            System.out.println("Error while copying file");
        } finally {
            out.close();
        }
    }
}

技术分享
hbase-site.xml
从/etc/hyperbase1/conf下拷贝
http://192.168.146.128:8180/#/dashboard 确保hyperbase1服务启动状态


// * 副本数量   hbase0.98.6
public class HbaseCreateTable {

    public static void main(String[] args) throws IOException {
        Configuration conf = HBaseConfiguration.create();
        HBaseAdmin ha = new HBaseAdmin(conf);
        HTableDescriptor htd = new HTableDescriptor("testReplication".getBytes());       

      HColumnDescriptor hcd1 = new HColumnDescriptor("s").setMaxVersions(30)
                .setBloomFilterType(BloomType.ROW);
        hcd1.setConfiguration("DFS_REPLICATION", "2"); //set columnfamily replication 

        htd.addFamily(hcd1);

    ha.createTable(htd);
        ha.close();
    }
}

hdfs dfs -ls /hyperbase1/data/default/testReplication/c38f234712a99d45797ef1bdd6c3b09a/s
用ls命令查看副本数
 

设置hdfs和hbase副本数。hadoop2.5.2 hbase0.98.6