spark.yarn.historyServer.address=slave11:18080 spark.history.ui.port=18080 spark.eventLog.enabled=true spark.eventLog.dir=hdfs:///tmp/spark/events spark.history.fs.logDirectory=hdfs:///tmp/spark/events spark.driver.memory=1g spark.serializer=org.apache.spark.serializer.KryoSerializer
1)格式: barCode@item@value@standardValue@upperLimit@lowerLimitshell
01055HAXMTXG10100001@KEY_VOLTAGE_TEC_PWR@1.60@1.62@1.75@1.55
01055HAXMTXG10100001@KEY_VOLTAGE_T_C_PWR@1.22@1.24@1.45@0.8
01055HAXMTXG10100001@KEY_VOLTAGE_T_BC_PWR@1.16@1.25@1.45@0.8
01055HAXMTXG10100001@KEY_VOLTAGE_11@1.32@1.25@1.45@0.8
01055HAXMTXG10100001@KEY_VOLTAGE_T_RC_PWR@1.24@1.25@1.45@0.8
01055HAXMTXG10100001@KEY_VOLTAGE_T_VCC_5V@1.93@1.90@1.95@1.65
01055HAXMTXG10100001@KEY_VOLTAGE_T_VDD3V3@1.59@1.62@1.75@1.55apache
1)既然是與HBase相關,那麼首先須要使用hbase shell來建立一個表oop
建立表格:create ‘data’,’v’,create ‘data1’,’v’測試
2)使用spark-shell進行操做,命令以下:ui
bin/spark-shell --master yarn --deploy-mode client --num-executors 5 --executor-memory 1g --executor-cores 2spa
3)import 各類類3d
import org.apache.spark._ import org.apache.spark.rdd.NewHadoopRDD import org.apache.hadoop.mapred.JobConf import org.apache.hadoop.conf.Configuration import org.apache.hadoop.mapreduce.Job import org.apache.hadoop.mapreduce.lib.input.FileInputFormat import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat import org.apache.hadoop.fs.Path import org.apache.hadoop.hbase.client.Put import org.apache.hadoop.hbase.io.ImmutableBytesWritable import org.apache.hadoop.hbase.mapred.TableOutputFormat import org.apache.hadoop.hbase.mapreduce.TableInputFormat import org.apache.hadoop.hbase.HBaseConfiguration import org.apache.hadoop.hbase.client.HBaseAdmin import org.apache.hadoop.hbase.client.HTable import org.apache.hadoop.hbase.client.Scan import org.apache.hadoop.hbase.client.Get import org.apache.hadoop.hbase.protobuf.ProtobufUtil import org.apache.hadoop.hbase.util.{Base64,Bytes} import org.apache.hadoop.hbase.KeyValue import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles import org.apache.hadoop.hbase.HColumnDescriptor import org.apache.commons.codec.digest.DigestUtils
建立conf和tablecode
val conf= HBaseConfiguration.create() conf.set(TableInputFormat.INPUT_TABLE,"data1") val table = new HTable(conf,"data1")
格式:orm
val put = new Put(Bytes.toBytes("rowKey")) put.add("cf","q","value")
使用for來插入5條數據xml
for(i <- 1 to 5){ var put= new Put(Bytes.toBytes("row"+i));put.add(Bytes.toBytes("v"),Bytes.toBytes("value"),Bytes.toBytes("value"+i));table.put(put)}
到hbase shell中查看結果
val hbaseRdd = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],classOf[org.apache.hadoop.hbase.client.Result])
1)take
hbaseRdd take 1
2)scan
var scan = new Scan(); scan.addFamily(Bytes.toBytes(「v」)); var proto = ProtobufUtil.toScan(scan) var scanToString = Base64.encodeBytes(proto.toByteArray()); conf.set(TableInputFormat.SCAN,scanToString) val datas = hbaseRdd.map( x=>x._2).map{result => (result.getRow,result.getValue(Bytes.toBytes("v"),Bytes.toBytes("value")))}.map(row => (new String(row._1),new String(row._2))).collect.foreach(r => (println(r._1+":"+r._2)))
1)代碼
val rdd = sc.textFile("/data/produce/2015/2015-03-01.log") val data = rdd.map(_.split("@")).map{x=>(x(0)+x(1),x(2))} val result = data.foreachPartition{x => {val conf= HBaseConfiguration.create();conf.set(TableInputFormat.INPUT_TABLE,"data");conf.set("hbase.zookeeper.quorum","slave5,slave6,slave7");conf.set("hbase.zookeeper.property.clientPort","2181");conf.addResource("/home/hadoop/data/lib/hbase-site.xml");val table = new HTable(conf,"data");table.setAutoFlush(false,false);table.setWriteBufferSize(3*1024*1024); x.foreach{y => { var put= new Put(Bytes.toBytes(y._1));put.add(Bytes.toBytes("v"),Bytes.toBytes("value"),Bytes.toBytes(y._2));table.put(put)};table.flushCommits}}}
2)執行時間以下:7.6 min
1) 代碼:
val conf = HBaseConfiguration.create(); val tableName = "data1" val table = new HTable(conf,tableName) conf.set(TableOutputFormat.OUTPUT_TABLE,tableName) lazy val job = Job.getInstance(conf) job.setMapOutputKeyClass(classOf[ImmutableBytesWritable]) job.setMapOutputValueClass(classOf[KeyValue]) HFileOutputFormat.configureIncrementalLoad(job,table) val rdd = sc.textFile("/data/produce/2015/2015-03-01.log").map(_.split("@")).map{x => (DigestUtils.md5Hex(x(0)+x(1)).substring(0,3)+x(0)+x(1),x(2))}.sortBy(x =>x._1).map{x=>{val kv:KeyValue = new KeyValue(Bytes.toBytes(x._1),Bytes.toBytes("v"),Bytes.toBytes("value"),Bytes.toBytes(x._2+""));(new ImmutableBytesWritable(kv.getKey),kv)}} rdd.saveAsNewAPIHadoopFile("/tmp/data1",classOf[ImmutableBytesWritable],classOf[KeyValue],classOf[HFileOutputFormat],job.getConfiguration()) val bulkLoader = new LoadIncrementalHFiles(conf) bulkLoader.doBulkLoad(new Path("/tmp/data1"),table)
2) 執行時間:7s
3)執行結果:
到hbase shell 中查看 list 「data1」
經過對比咱們能夠發現bulkload批量導入所用時間遠遠少於普通導入,速度提高了60多倍,固然我沒有使用更大的數據量測試,可是我相信導入速度的提高是很是顯著的,強烈建議使用BulkLoad批量導入數據到HBase中。
關於Spark與Hbase之間操做就寫到這裏,若是有什麼地方寫得不對或者運行不了,歡迎指出,謝謝