HBase API操做

環境準備

新建項目後在pom.xml中添加依賴:java

<dependency>
    <groupId>org.apache.hbase</groupId>
    <artifactId>hbase-server</artifactId>
    <version>1.3.1</version>
</dependency>

<dependency>
    <groupId>org.apache.hbase</groupId>
    <artifactId>hbase-client</artifactId>
    <version>1.3.1</version>
</dependency>

<dependency>
    <groupId>jdk.tools</groupId>
    <artifactId>jdk.tools</artifactId>
    <version>1.8</version>
    <scope>system</scope>


<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath> </dependency>

2 HBaseAPI

2.1 獲取Configuration對象

public static Configuration conf;
static{
    //使用HBaseConfiguration的單例方法實例化
    conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.9.102");
conf.set("hbase.zookeeper.property.clientPort", "2181");
}

2.2 判斷表是否存在

public static boolean isTableExist(String tableName) throws MasterNotRunningException,
 ZooKeeperConnectionException, IOException{
    //在HBase中管理、訪問表須要先建立HBaseAdmin對象
//Connection connection = ConnectionFactory.createConnection(conf);
//HBaseAdmin admin = (HBaseAdmin) connection.getAdmin();
    HBaseAdmin admin = new HBaseAdmin(conf);
    return admin.tableExists(tableName);
} 

2.3 建立表

public static void createTable(String tableName, String... columnFamily) throws
 MasterNotRunningException, ZooKeeperConnectionException, IOException{
    HBaseAdmin admin = new HBaseAdmin(conf);
    //判斷表是否存在
    if(isTableExist(tableName)){
        System.out.println("表" + tableName + "已存在");
        //System.exit(0);
    }else{
        //建立表屬性對象,表名須要轉字節
        HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(tableName));
        //建立多個列族
        for(String cf : columnFamily){
            descriptor.addFamily(new HColumnDescriptor(cf));
        }
        //根據對錶的配置,建立表
        admin.createTable(descriptor);
        System.out.println("表" + tableName + "建立成功!");
    }
}

2.4 刪除表

public static void dropTable(String tableName) throws MasterNotRunningException,
 ZooKeeperConnectionException, IOException{
    HBaseAdmin admin = new HBaseAdmin(conf);
    if(isTableExist(tableName)){
        admin.disableTable(tableName);
        admin.deleteTable(tableName);
        System.out.println("表" + tableName + "刪除成功!");
    }else{
        System.out.println("表" + tableName + "不存在!");
    }
  

2.5 向表中插入數據

public static void addRowData(String tableName, String rowKey, String columnFamily, String
 column, String value) throws IOException{
    //建立HTable對象
    HTable hTable = new HTable(conf, tableName);
    //向表中插入數據
    Put put = new Put(Bytes.toBytes(rowKey));
    //向Put對象中組裝數據
    put.add(Bytes.toBytes(columnFamily), Bytes.toBytes(column), Bytes.toBytes(value));
    hTable.put(put);
    hTable.close();
    System.out.println("插入數據成功");
}

2.6 刪除多行數據

public static void deleteMultiRow(String tableName, String... rows) throws IOException{
    HTable hTable = new HTable(conf, tableName);
    List<Delete> deleteList = new ArrayList<Delete>();
    for(String row : rows){
        Delete delete = new Delete(Bytes.toBytes(row));
        deleteList.add(delete);
    }
    hTable.delete(deleteList);
    hTable.close();
}

 

public static void deleteMultiRow(String tableName, String... rows) throws IOException{
    HTable hTable = new HTable(conf, tableName);
    List<Delete> deleteList = new ArrayList<Delete>();
    for(String row : rows){
        Delete delete = new Delete(Bytes.toBytes(row));
        deleteList.add(delete);
    }
    hTable.delete(deleteList);
    hTable.close();
}
View Code

 

2.7 獲取全部數據

public static void getAllRows(String tableName) throws IOException{
    HTable hTable = new HTable(conf, tableName);
    //獲得用於掃描region的對象
    Scan scan = new Scan();
    //使用HTable獲得resultcanner實現類的對象
    ResultScanner resultScanner = hTable.getScanner(scan);
    for(Result result : resultScanner){
        Cell[] cells = result.rawCells();
        for(Cell cell : cells){
            //獲得rowkey
            System.out.println("行鍵:" + Bytes.toString(CellUtil.cloneRow(cell)));
            //獲得列族
            System.out.println("列族" + Bytes.toString(CellUtil.cloneFamily(cell)));
            System.out.println("列:" + Bytes.toString(CellUtil.cloneQualifier(cell)));
            System.out.println("值:" + Bytes.toString(CellUtil.cloneValue(cell)));
        }
    }
}

2.8 獲取某一行數據

public static void getRow(String tableName, String rowKey) throws IOException{
    HTable table = new HTable(conf, tableName);
    Get get = new Get(Bytes.toBytes(rowKey));
    //get.setMaxVersions();顯示全部版本
    //get.setTimeStamp();顯示指定時間戳的版本
    Result result = table.get(get);
    for(Cell cell : result.rawCells()){
        System.out.println("行鍵:" + Bytes.toString(result.getRow()));
        System.out.println("列族" + Bytes.toString(CellUtil.cloneFamily(cell)));
        System.out.println("列:" + Bytes.toString(CellUtil.cloneQualifier(cell)));
        System.out.println("值:" + Bytes.toString(CellUtil.cloneValue(cell)));
        System.out.println("時間戳:" + cell.getTimestamp());
    }
} 

2.9 獲取某一行指定「列族:列」的數據

public static void getRowQualifier(String tableName, String rowKey, String family, String
 qualifier) throws IOException{
    HTable table = new HTable(conf, tableName);
    Get get = new Get(Bytes.toBytes(rowKey));
    get.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
    Result result = table.get(get);
    for(Cell cell : result.rawCells()){
        System.out.println("行鍵:" + Bytes.toString(result.getRow()));
        System.out.println("列族" + Bytes.toString(CellUtil.cloneFamily(cell)));
        System.out.println("列:" + Bytes.toString(CellUtil.cloneQualifier(cell)));
        System.out.println("值:" + Bytes.toString(CellUtil.cloneValue(cell)));
    }
}

 

 

3 MapReduce

經過HBase的相關JavaAPI,咱們能夠實現伴隨HBase操做的MapReduce過程,好比使用MapReduce將數據從本地文件系統導入到HBase的表中,好比咱們從HBase中讀取一些原始數據後使用MapReduce作數據分析。apache

3.1 官方HBase-MapReduce

1.查看HBase的MapReduce任務的執行數組

$ bin/hbase mapredcp

2.環境變量的導入app

(1)執行環境變量的導入(臨時生效,在命令行執行下述操做)maven

$ export HBASE_HOME=/opt/module/hbase-1.3.1
$ export HADOOP_HOME=/opt/module/hadoop-2.7.2
$ export HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase mapredcp` 

(2)永久生效:在/etc/profile配置ide

export HBASE_HOME=/opt/module/hbase-1.3.1
export HADOOP_HOME=/opt/module/hadoop-2.7.2

並在hadoop-env.sh中配置:(注意:在for循環以後配)函數

並在hadoop-env.sh中配置:(注意:在for循環以後配)
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/opt/module/hbase/lib/*

3.運行官方的MapReduce任務oop

-- 案例一:統計Student表中有多少行數據ui

$ /opt/module/hadoop-2.7.2/bin/yarn jar lib/hbase-server-1.3.1.jar rowcounter student

 

-- 案例二:使用MapReduce將本地數據導入到HBasethis

1)在本地建立一個tsv格式的文件:fruit.tsv

1001    Apple    Red
1002    Pear        Yellow
1003    Pineapple    Yellow

2)建立HBase表

hbase(main):001:0> create 'fruit','info' 

3)在HDFS中建立input_fruit文件夾並上傳fruit.tsv文件

$ /opt/module/hadoop-2.7.2/bin/hdfs dfs -mkdir /input_fruit/
$ /opt/module/hadoop-2.7.2/bin/hdfs dfs -put fruit.tsv /input_fruit/

4)執行MapReduce到HBase的fruit表中

$ /opt/module/hadoop-2.7.2/bin/yarn jar lib/hbase-server-1.3.1.jar importtsv \
-Dimporttsv.columns=HBASE_ROW_KEY,info:name,info:color fruit \
hdfs://hadoop102:9000/input_fruit

5)使用scan命令查看導入後的結果

hbase(main):001:0> scan ‘fruit’

3.2 自定義HBase-MapReduce1

目標:將fruit表中的一部分數據,經過MR遷入到fruit_mr表中。

分步實現:

1.構建ReadFruitMapper類,用於讀取fruit表中的數據

import java.io.IOException;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;

public class ReadFruitMapper extends TableMapper<ImmutableBytesWritable, Put> {

    @Override
    protected void map(ImmutableBytesWritable key, Result value, Context context) 
    throws IOException, InterruptedException {
    //將fruit的name和color提取出來,至關於將每一行數據讀取出來放入到Put對象中。
        Put put = new Put(key.get());
        //遍歷添加column行
        for(Cell cell: value.rawCells()){
            //添加/克隆列族:info
            if("info".equals(Bytes.toString(CellUtil.cloneFamily(cell)))){
                //添加/克隆列:name
                if("name".equals(Bytes.toString(CellUtil.cloneQualifier(cell)))){
                    //將該列cell加入到put對象中
                    put.add(cell);
                    //添加/克隆列:color
                }else if("color".equals(Bytes.toString(CellUtil.cloneQualifier(cell)))){
                    //向該列cell加入到put對象中
                    put.add(cell);
                }
            }
        }
        //將從fruit讀取到的每行數據寫入到context中做爲map的輸出
        context.write(key, put);
    }
}

2. 構建WriteFruitMRReducer類,用於將讀取到的fruit表中的數據寫入到fruit_mr表中

import java.io.IOException;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.NullWritable;

public class WriteFruitMRReducer extends TableReducer<ImmutableBytesWritable, Put, NullWritable> {
    @Override
    protected void reduce(ImmutableBytesWritable key, Iterable<Put> values, Context context) 
    throws IOException, InterruptedException {
        //讀出來的每一行數據寫入到fruit_mr表中
        for(Put put: values){
            context.write(NullWritable.get(), put);
        }
    }
}

3.構建Fruit2FruitMRRunner extends Configured implements Tool用於組裝運行Job任務

//組裝Job
    public int run(String[] args) throws Exception {
        //獲得Configuration
        Configuration conf = this.getConf();
        //建立Job任務
        Job job = Job.getInstance(conf, this.getClass().getSimpleName());
        job.setJarByClass(Fruit2FruitMRRunner.class);

        //配置Job
        Scan scan = new Scan();
        scan.setCacheBlocks(false);
        scan.setCaching(500);

        //設置Mapper,注意導入的是mapreduce包下的,不是mapred包下的,後者是老版本
        TableMapReduceUtil.initTableMapperJob(
        "fruit", //數據源的表名
        scan, //scan掃描控制器
        ReadFruitMapper.class,//設置Mapper類
        ImmutableBytesWritable.class,//設置Mapper輸出key類型
        Put.class,//設置Mapper輸出value值類型
        job//設置給哪一個JOB
        );
        //設置Reducer
        TableMapReduceUtil.initTableReducerJob("fruit_mr", WriteFruitMRReducer.class, job);
        //設置Reduce數量,最少1個
        job.setNumReduceTasks(1);

        boolean isSuccess = job.waitForCompletion(true);
        if(!isSuccess){
            throw new IOException("Job running with error");
        }
        return isSuccess ? 0 : 1;
    }

4.主函數中調用運行該Job任務

public static void main( String[] args ) throws Exception{
Configuration conf = HBaseConfiguration.create();
int status = ToolRunner.run(conf, new Fruit2FruitMRRunner(), args);
System.exit(status);
}

5.打包運行任務

$ /opt/module/hadoop-2.7.2/bin/yarn jar ~/softwares/jars/hbase-0.0.1-SNAPSHOT.jar
 com.z.hbase.mr1.Fruit2FruitMRRunner

提示:運行任務前,若是待數據導入的表不存在,則須要提早建立。

提示:maven打包命令:-P local clean package或-P dev clean package install(將第三方jar包一同打包,須要插件:maven-shade-plugin)

3.3 自定義HBase-MapReduce2

目標:實現將HDFS中的數據寫入到HBase表中。

分步實現:

1.構建ReadFruitFromHDFSMapper於讀取HDFS中的文件數據

import java.io.IOException;

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class ReadFruitFromHDFSMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //從HDFS中讀取的數據
        String lineValue = value.toString();
        //讀取出來的每行數據使用\t進行分割,存於String數組
        String[] values = lineValue.split("\t");
        
        //根據數據中值的含義取值
        String rowKey = values[0];
        String name = values[1];
        String color = values[2];
        
        //初始化rowKey
        ImmutableBytesWritable rowKeyWritable = new ImmutableBytesWritable(Bytes.toBytes(rowKey));
        
        //初始化put對象
        Put put = new Put(Bytes.toBytes(rowKey));
        
        //參數分別:列族、列、值  
        put.add(Bytes.toBytes("info"), Bytes.toBytes("name"),  Bytes.toBytes(name)); 
        put.add(Bytes.toBytes("info"), Bytes.toBytes("color"),  Bytes.toBytes(color)); 
        
        context.write(rowKeyWritable, put);
    }
}

2.構建WriteFruitMRFromTxtReducer類

import java.io.IOException;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.NullWritable;

public class WriteFruitMRFromTxtReducer extends TableReducer<ImmutableBytesWritable, Put, NullWritable> {
    @Override
    protected void reduce(ImmutableBytesWritable key, Iterable<Put> values, Context context) throws IOException, InterruptedException {
        //讀出來的每一行數據寫入到fruit_hdfs表中
        for(Put put: values){
            context.write(NullWritable.get(), put);
        }
    }
}

3.建立Txt2FruitRunner組裝Job

public int run(String[] args) throws Exception {
//獲得Configuration
Configuration conf = this.getConf();

//建立Job任務
Job job = Job.getInstance(conf, this.getClass().getSimpleName());
job.setJarByClass(Txt2FruitRunner.class);
Path inPath = new Path("hdfs://hadoop102:9000/input_fruit/fruit.tsv");
FileInputFormat.addInputPath(job, inPath);

//設置Mapper
job.setMapperClass(ReadFruitFromHDFSMapper.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(Put.class);

//設置Reducer
TableMapReduceUtil.initTableReducerJob("fruit_mr", WriteFruitMRFromTxtReducer.class, job);

//設置Reduce數量,最少1個
job.setNumReduceTasks(1);

boolean isSuccess = job.waitForCompletion(true);
if(!isSuccess){
throw new IOException("Job running with error");
}

return isSuccess ? 0 : 1;
}

4.調用執行Job

public static void main(String[] args) throws Exception {
        Configuration conf = HBaseConfiguration.create();
        int status = ToolRunner.run(conf, new Txt2FruitRunner(), args);
        System.exit(status);
}

5.打包運行

$ /opt/module/hadoop-2.7.2/bin/yarn jar hbase-0.0.1-SNAPSHOT.jar com.atguigu.hbase.mr2.Txt2FruitRunner

提示:運行任務前,若是待數據導入的表不存在,則須要提早建立之。

提示:maven打包命令:-P local clean package或-P dev clean package install(將第三方jar包一同打包,須要插件:maven-shade-plugin)

相關文章
相關標籤/搜索