1.安裝並配置zkhtml
2.安裝並配置Kafkajava
3.啓動zkapache
4.啓動Kafkaui
5.建立topicspa
[root@mini3 kafka]# bin/kafka-console-producer.sh --broker-list mini1:9092 --topic cyf-test
程序代碼.net
package org.apache.spark import java.net.InetSocketAddress import org.apache.spark.HashPartitioner import org.apache.spark.SparkConf import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.Seconds import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.flume.FlumeUtils import org.apache.spark.streaming.kafka.KafkaUtils object KafkaWordCount { val updateFunction = (iter: Iterator[(String, Seq[Int], Option[Int])]) => { iter.flatMap { case (x, y, z) => Some(y.sum + z.getOrElse(0)).map(v => (x, v)) } } def main(args: Array[String]) { val conf = new SparkConf().setMaster("local[2]").setAppName("KafkaWordCount") val ssc = new StreamingContext(conf, Seconds(5)) //回滾點設置在本地 // ssc.checkpoint("./") //將回滾點寫到hdfs ssc.checkpoint("hdfs://mini1:9000/kafkatest") //val Array(zkQuorum, groupId, topics, numThreads) = args val Array(zkQuorum, groupId, topics, numThreads) = Array[String]("mini1:2181,mini2:2181,mini3:2181", "g1", "cyf-test", "2") val topicMap = topics.split(",").map((_, numThreads.toInt)).toMap val lines = KafkaUtils.createStream(ssc, zkQuorum, groupId, topicMap).map(_._2) val results = lines.flatMap(_.split(" ")).map((_, 1)).updateStateByKey(updateFunction, new HashPartitioner(ssc.sparkContext.defaultParallelism), true) results.print() ssc.start() ssc.awaitTermination() } }
記一次遇到的問題 http://www.javashuo.com/article/p-qnbbxhbw-ed.html3d