kafka0.8 VS kafka1.0 vs spark2.2.0

kafka0.8 vs spark2.2.0

<!-- sparkStreaming 和kafka整合的依賴 0-8_2.11 -->
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
    <version>${spark.version}</version>
</dependency>

<dependency>
    <groupId>org.apache.kafka</groupId>
    <!--<artifactId>kafka_2.10</artifactId>-->
    <version>0.8.2.1</version>
    <version>0.10.0.0</version>
</dependency>
package com.xp.cn.streaming

import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.{ZKGroupTopicDirs, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Duration, StreamingContext}

/**
  * Created by zx on 2017/7/31.
  */
object KafkaDirectWordCountV2 {

  def main(args: Array[String]): Unit = {

    //指定組名
    val group = "g001"

    //建立SparkConf
    val conf = new SparkConf().setAppName("KafkaDirectWordCount").setMaster("local[2]")

    //建立SparkStreaming,並設置間隔時間
    val ssc = new StreamingContext(conf, Duration(5000))

    //指定消費的 topic 名字
    val topic = "wwcc"

    //指定kafka的broker地址(sparkStream的Task直連到kafka的分區上,用更加底層的API消費,效率更高)
    val brokerList = "xupan001:9092,xupan001:9092,xupan001:9092"

    //指定zk的地址,後期更新消費的偏移量時使用(之後能夠使用Redis、MySQL來記錄偏移量)
    val zkQuorum = "xupan001:2181,xupan001:2181,xupan001:2181"

    //建立 stream 時使用的 topic 名字集合,SparkStreaming可同時消費多個topic
    val topics: Set[String] = Set(topic)

    //建立一個 ZKGroupTopicDirs 對象,實際上是指定往zk中寫入數據的目錄,用於保存偏移量
    val topicDirs = new ZKGroupTopicDirs(group, topic)
    //獲取 zookeeper 中的路徑 "/g001/offsets/wordcount/"
    val zkTopicPath = s"${topicDirs.consumerOffsetDir}"

    //準備kafka的參數
    val kafkaParams = Map(
      "metadata.broker.list" -> brokerList,
      "group.id" -> group,
      //從頭開始讀取數據
      "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString
    )

    //zookeeper 的host 和 ip,建立一個 client,用於跟新偏移量量的
    //是zookeeper的客戶端,能夠從zk中讀取偏移量數據,並更新偏移量
    val zkClient = new ZkClient(zkQuorum)

    //查詢該路徑下是否字節點(默認有字節點爲咱們本身保存不一樣 partition 時生成的)
    // /g001/offsets/wordcount/0/10001"
    // /g001/offsets/wordcount/1/30001"
    // /g001/offsets/wordcount/2/10001"
    //zkTopicPath  -> /g001/offsets/wordcount/
    val children = zkClient.countChildren(zkTopicPath)

    var kafkaStream: InputDStream[(String, String)] = null

    //若是 zookeeper 中有保存 offset,咱們會利用這個 offset 做爲 kafkaStream 的起始位置
    var fromOffsets: Map[TopicAndPartition, Long] = Map()

    //若是保存過 offset
    if (children > 0) {
      for (i <- 0 until children) {
        // /g001/offsets/wordcount/0/10001

        // /g001/offsets/wordcount/0
        val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}")
        // wordcount/0
        val tp = TopicAndPartition(topic, i)
        //將不一樣 partition 對應的 offset 增長到 fromOffsets 中
        // wordcount/0 -> 10001
        fromOffsets += (tp -> partitionOffset.toLong)
      }
      //Key: kafka的key   values: "hello tom hello jerry"
      //這個會將 kafka 的消息進行 transform,最終 kafak 的數據都會變成 (kafka的key, message) 這樣的 tuple
      val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key(), mmd.message())
   
      //經過KafkaUtils建立直連的DStream(fromOffsets參數的做用是:按照前面計算好了的偏移量繼續消費數據)
   //[String, String, StringDecoder, StringDecoder,     (String, String)]
   //  key    value    key的解碼方式   value的解碼方式 
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler)
    } else {
      //若是未保存,根據 kafkaParam 的配置使用最新(largest)或者最舊的(smallest) offset
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
    }

    //偏移量的範圍
    var offsetRanges = Array[OffsetRange]()

    //直連方式只有在KafkaDStream的RDD中才能獲取偏移量,那麼就不能到調用DStream的Transformation
    //因此只能子在kafkaStream調用foreachRDD,獲取RDD的偏移量,而後就是對RDD進行操做了
    //依次迭代KafkaDStream中的KafkaRDD
    //foreachRDD觸發的實際操做是DStream轉換,kafkaStream.foreachRDD這一步其實是在Driver中調用的
    //rdd.foreach是在Executor中執行的
    kafkaStream.foreachRDD { kafkaRDD =>
      //只有KafkaRDD能夠強轉成HasOffsetRanges,並獲取到偏移量
      offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
      val lines: RDD[String] = kafkaRDD.map(_._2)

      //對RDD進行操做,觸發Action
      //foreachPartition在Executor中執行
      lines.foreachPartition(partition =>
        partition.foreach(x => {
          println(x)
        })
      )

      for (o <- offsetRanges) {
        //  /g001/offsets/wordcount/0
        val zkPath = s"${topicDirs.consumerOffsetDir}/${o.partition}"
        //將該 partition 的 offset 保存到 zookeeper
        //  /g001/offsets/wordcount/0/20000
        ZkUtils.updatePersistentPath(zkClient, zkPath, o.untilOffset.toString)
      }
    }

    ssc.start()
    ssc.awaitTermination()

  }

}

 

 

 

 

kafka1.0 vs spark2.2.0
 

<!-- kafka -->
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka_2.10</artifactId>
    <!--<version>0.8.2.1</version>-->
    <version>0.10.0.0</version>
</dependency>

<!-- sparkStreaming 和kafka整合的依賴 0-10_2.11 -->
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
    <version>${spark.version}</version>
</dependency>

 

 

package com.xp.cn.streaming

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, CanCommitOffsets, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf}

/**
 * Created by xupan on 2017/12/18.
 * spark streaming kafka_2.10-0.10.2.1
 * 不要像0.8那樣須要手動把偏移量更新到Zookeeper中
 * 1.0默認把偏移量更新到kafka中
 */
object KafkaStreamingV2 {


  def main(args: Array[String]) {
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)

    //建立conf,spark streaming至少要啓動兩個線程,一個負責接受數據,一個負責處理數據
    val conf = new SparkConf().setAppName("KafkaStreamingV2").setMaster("local[4]")

    //建立StreamingContext,每隔10秒產生一個批次
    val ssc = new StreamingContext(conf, Seconds(10))

    val group = "v2group"
    val topic = "v2topic"

    //配置Kafka參數
    val kafkaParams = Map[String,Object](
      "bootstrap.servers" -> "xupan001:9092,xupan002:9092,xupan003:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",//kafka中沒有偏移量從頭開始讀,有就從偏移量開始讀
      "enable.auto.commit" -> {false:java.lang.Boolean}//不是自動提交
    )

    //能夠讀取多個topic
    val topics = Array(topic)

    //用直連方式讀取Kafka數據,在Kafka中讀取偏移量
    val stream = KafkaUtils.createDirectStream[String,String](
      ssc,
      PreferConsistent,//位置策略(若是Kafka和spark程序在同一臺機器,會從最優位置讀取數據【當前位置】)
      Subscribe[String,String](topics,kafkaParams)//訂閱策略(能夠指定用正則的方式讀取topic【topic-*】)
    )

    stream.foreachRDD(rdd => {

      if (!rdd.isEmpty()) {
        val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges


        //====================在下面寫業務邏輯============================
        rdd.foreachPartition(part => {
          part.foreach(line => {
            val value = line.value()
            val key = line.key()
            println("key : " + key + "  value : " + value)
          })
        })
        //====================在上面寫業務邏輯============================


        //commitAsync(offsetRanges: Array[OffsetRange])
        stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
      }


    })

    ssc.start()
    ssc.awaitTermination()
  }

}
相關文章
相關標籤/搜索