首页 > 代码库 > hadoop-hbase学习笔记
hadoop-hbase学习笔记
create "t",{NAME=>"t_id"},{NAME=>"t_vl"}
describe "t"
disable "t"
alter "t",{NAME=>"t_info",VERSIONS=>3}
put "t","1001","t_vl:age","25" //1001是rowkey
get "t","1001"
scan "t"
delete "t","1001","t_vl:age"
1.hbase hbck -fixMeta 2.hbase hbck -fixAssignments
//2.ImportTsv --成功
hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.separator="," -Dimporttsv.columns=HBASE_ROW_KEY,cf sample /luo/sample1.csv
//Dimporttsv 注意 这个output目录不能存在 否则会报错---但是没成功
hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.separator="," -Dimporttsv.bulk.output=/luo -Dimporttsv.columns=HBASE_ROW_KEY,cf sample1 /luo/sample1.csv
hadoop jar /usr/lib/hbase/hbase-server-1.0.0-cdh5.4.3.jar completebulkload /luo1 sample1
//先导出后导入--成功
hbase org.apache.hadoop.hbase.mapreduce.Export sample /luo3
hbase org.apache.hadoop.hbase.mapreduce.Import sample2 /luo3 -----这个sample2表要先创建好
//HBASE的二级索引
1.Mapreduce方案
//hbase 快照
snapshot ‘student‘,‘stduent_snapshot‘ ---生成student表的快照 student_snapshot
list_snapshots ---列出所有快照
clone_snapshot ‘stduent_snapshot‘,‘student_new‘ --从指定的快照生成新表
restore_snapshot ‘stduent_snapshot‘ ---使用快照恢复表
delete ‘stduent_snapshot‘
hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot student_snapshot -copy-to hdfs:///server:8083/hbase
//solr 实时查询方案
hbase提供海量数据存储
solr提供索引构建和查询
hbase indexer提供自动化索引构建
hadoop-hbase学习笔记