spark 鏈接使用 hbase

1、環境準備java

    一、複製HBase目錄下的lib文件 到 spark目錄/lib/hbase。spark 依賴此libshell

    清單以下:guava-12.0.1.jar            htrace-core-3.1.0-incubating.jar protobuf-java-2.5.0.jar   這三個jar加上以hbase開頭全部jar,其它就沒必要了。所有複製會引發報錯。apache

   二、修改spark配置文件(spark-env.sh),在最後面增長一行api

 export SPARK_CLASSPATH=/usr/local/spark-1.5.1-bin-hadoop2.4/lib/hbase/*

   三、重啓spark 集羣app

2、代碼oop

package com.xx;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;

import java.io.IOException;

/**
 * spark 讀取HBase數據
 * @author Chenj
 */
public class ReadHBase {

    private static final Log LOG = LogFactory.getLog(ErrorCount.class);

    private static final String appName = "hbase test";
    private static final String master = "spark://192.168.1.21:7077";

    public static void main(String[] avgs){
        SparkConf conf = new SparkConf().
                            setAppName(appName).
                            setMaster(master).
                            setSparkHome(System.getenv("SPARK_HOME")).
                            setJars(new String[]{System.getenv("jars")});

        Configuration configuration = HBaseConfiguration.create();

        configuration.set("hbase.zookeeper.property.clientPort", "2181");  //設置zookeeper client端口
        configuration.set("hbase.zookeeper.quorum", "192.168.1.19");   // 設置zookeeper quorum
        configuration.addResource("/usr/local/hbase-1.0.1.1/conf/hbase-site.xml");  //將hbase的配置加載

        configuration.set(TableInputFormat.INPUT_TABLE, "heartSocket");
        JavaSparkContext sc = new JavaSparkContext(conf);

        Scan scan = new Scan();
        scan.addFamily(Bytes.toBytes("d"));
        scan.addColumn(Bytes.toBytes("d"), Bytes.toBytes("consumeTime"));
        try {
            ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
            String scanToString = Base64.encodeBytes(proto.toByteArray());
            configuration.set(TableInputFormat.SCAN, scanToString);
        } catch (IOException e) {
            e.printStackTrace();
        }

        JavaPairRDD<ImmutableBytesWritable, Result> rdd = sc.newAPIHadoopRDD(configuration,
                TableInputFormat.class,
                ImmutableBytesWritable.class,
                Result.class);

        LOG.info("總個數爲:" + rdd.count());

    }
}

三、提交運行spa

./spark-submit --class com.xx.ReadHBase --master spark://ser21:7077 /usr/local/spark-1.0-SNAPSHOT.jar
相關文章
相關標籤/搜索