1)在eclipse中建立一個java工程java
2)在工程的根目錄建立一個lib文件夾node
3)解壓kafka安裝包,將安裝包libs目錄下的jar包拷貝到工程的lib目錄下,並build path。apache
4)啓動zk和kafka集羣,在kafka集羣中打開一個消費者bootstrap
[root@node21 kafka]$ bin/kafka-console-consumer.sh --zookeeper node21:2181,node22:2181,node23:2181 --topic firstTopicapi
這裏用maven,pom文件引入依賴緩存
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka --> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka_2.11</artifactId> <version>1.1.0</version> </dependency>
package com.xyg.kafka.producer; import kafka.javaapi.producer.Producer; import kafka.producer.KeyedMessage; import kafka.producer.ProducerConfig; import java.util.Properties; public class OldProducer { @SuppressWarnings("deprecation") public static void main(String[] args) { Properties properties = new Properties(); properties.put("metadata.broker.list", "node21:9092,node22:9092,node23:9092"); properties.put("request.required.acks", "1"); properties.put("serializer.class", "kafka.serializer.StringEncoder"); Producer<Integer, String> producer = new Producer<Integer,String>(new ProducerConfig(properties)); KeyedMessage<Integer, String> message = new KeyedMessage<Integer, String>("firstTopoic", "hello world"); producer.send(message ); } }
package com.xyg.kafka.producer; import java.util.Properties; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; public class NewProducer { public static void main(String[] args) { Properties props = new Properties(); // Kafka服務端的主機名和端口號 props.put("bootstrap.servers", "node21:9092,node22:9092,node23:9092"); // 等待全部副本節點的應答 props.put("acks", "all"); // 消息發送最大嘗試次數 props.put("retries", 0); // 一批消息處理大小 props.put("batch.size", 16384); // 請求延時 props.put("linger.ms", 1); // 發送緩存區內存大小 props.put("buffer.memory", 33554432); // key序列化 props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // value序列化 props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer<String, String> producer = new KafkaProducer<>(props); for (int i = 0; i < 50; i++) { ProducerRecord<String, String> record = new ProducerRecord<String, String>("firstTopic", Integer.toString(i), "hello world-" +i); producer.send(record); System.out.println(record); } producer.close(); } }
1.2.3 建立生產者帶回調函數(新API) 安全
package com.xyg.kafka.producer; import java.util.Properties; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; public class CallBackProducer { public static void main(String[] args) { Properties props = new Properties(); // Kafka服務端的主機名和端口號 props.put("bootstrap.servers", "node22:9092,node22:9092,node23:9092"); // 等待全部副本節點的應答 props.put("acks", "all"); // 消息發送最大嘗試次數 props.put("retries", 0); // 一批消息處理大小 props.put("batch.size", 16384); // 增長服務端請求延時 props.put("linger.ms", 1); // 發送緩存區內存大小 props.put("buffer.memory", 33554432); // key序列化 props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // value序列化 props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(props); for (int i = 0; i < 10; i++) { kafkaProducer.send(new ProducerRecord<String, String>("firstTopic", "hello" + i), new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (metadata != null) { System.out.println(metadata.partition() + "---" + metadata.offset()); } } }); } kafkaProducer.close(); } }
控制檯打印輸出以下:服務器
1---17 1---18 1---19 1---20 2---11 2---12 2---13 0---22 0---23 0---24 Process finished with exit code 0
1.2.4 自定義分區生產者 session
0)需求:將全部數據存儲到topic的第0號分區上app
1)定義一個類實現Partitioner接口,重寫裏面的方法(過期API)
package com.xyg.kafka.producer; import kafka.producer.Partitioner; public class OldCustomPartitioner implements Partitioner { public OldCustomPartitioner() { super(); } @Override public int partition(Object key, int numPartitions) { // 控制分區 return 0; } }
2)自定義分區(新API)
package com.xyg.kafka.producer; import org.apache.kafka.clients.producer.Partitioner; import org.apache.kafka.common.Cluster; import java.util.Map; public class NewCustomPartitioner implements Partitioner { @Override public void configure(Map<String, ?> configs) { } @Override public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { // 控制分區 return 0; } @Override public void close() { } }
3)在代碼中調用
package com.xyg.kafka.producer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import java.util.Properties; public class PartitionerProducer { public static void main(String[] args) { Properties props = new Properties(); // Kafka服務端的主機名和端口號 props.put("bootstrap.servers", "node21:9092,node22:9092,node23:9092"); // 等待全部副本節點的應答 props.put("acks", "all"); // 消息發送最大嘗試次數 props.put("retries", 0); // 一批消息處理大小 props.put("batch.size", 16384); // 增長服務端請求延時 props.put("linger.ms", 1); // 發送緩存區內存大小 props.put("buffer.memory", 33554432); // key序列化 props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // value序列化 props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // 自定義分區 props.put("partitioner.class", "com.xyg.kafka.NewCustomPartitioner"); Producer<String, String> producer = new KafkaProducer<>(props); producer.send(new ProducerRecord<String, String>("firstTopic", "1", "kafka")); System.out.println(new ProducerRecord<String, String>("firstTopic", "1", "kafka")); producer.close(); } }
(1)在node21上監控/opt/module/kafka/logs/目錄下firstTopic主題3個分區的log日誌動態變化狀況)測試
[admin@node21 firstTopic-0]$ tail -f 00000000000000000000.log
[admin@node21 firstTopic-1]$ tail -f 00000000000000000000.log
[admin@node21 firstTopic-2]$ tail -f 00000000000000000000.log
(2)發現數據都存儲到指定的分區了。
0)在控制檯建立發送者
[root@node21 kafka]$ bin/kafka-console-producer.sh --broker-list node21:9092,node22:9092,node23:9092 --topic firstTopic
>hello world
package com.xyg.kafka.consume; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import kafka.consumer.Consumer; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; public class CustomOldConsumer { @SuppressWarnings("deprecation") public static void main(String[] args) { Properties properties = new Properties(); properties.put("zookeeper.connect", "node21:2181,node22:2181,node23:2181"); properties.put("group.id", "g1"); properties.put("zookeeper.session.timeout.ms", "500"); properties.put("zookeeper.sync.time.ms", "250"); properties.put("auto.commit.interval.ms", "1000"); // 建立消費者鏈接器 ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(properties)); HashMap<String, Integer> topicCount = new HashMap<>(); topicCount.put("firstTopic", 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCount); KafkaStream<byte[], byte[]> stream = consumerMap.get("firstTopic").get(0); ConsumerIterator<byte[], byte[]> it = stream.iterator(); while (it.hasNext()) { System.out.println(new String(it.next().message())); } } }
官方提供案例(自動維護消費狀況)
ackage com.xyg.kafka.consume; import java.util.Arrays; import java.util.Properties; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; public class CustomNewConsumer { public static void main(String[] args) { Properties props = new Properties(); // 定義kakfa 服務的地址,不須要將全部broker指定上 props.put("bootstrap.servers", "node21:9092,node22:9092,node23:9092"); // 制定consumer group props.put("group.id", "test1"); // 是否自動確認offset props.put("enable.auto.commit", "true"); // 自動確認offset的時間間隔 props.put("auto.commit.interval.ms", "1000"); // key的序列化類 props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // value的序列化類 props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); // 定義consumer KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); // 消費者訂閱的topic, 可同時訂閱多個 consumer.subscribe(Arrays.asList("firstTopic", "second","third")); while (true) { // 讀取數據,讀取超時時間爲100ms ConsumerRecords<String, String> records = consumer.poll(100); for (ConsumerRecord<String, String> record : records) System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value()); } } }
Producer攔截器(interceptor)是在Kafka 0.10版本被引入的,主要用於實現clients端的定製化控制邏輯。
對於producer而言,interceptor使得用戶在消息發送前以及producer回調邏輯前有機會對消息作一些定製化需求,好比修改消息等。同時,producer容許用戶指定多個interceptor按序做用於同一條消息從而造成一個攔截鏈(interceptor chain)。Intercetpor的實現接口是org.apache.kafka.clients.producer.ProducerInterceptor,其定義的方法包括:
(1)configure(configs)
獲取配置信息和初始化數據時調用。
(2)onSend(ProducerRecord):
該方法封裝進KafkaProducer.send方法中,即它運行在用戶主線程中。Producer確保在消息被序列化以及計算分區前調用該方法。用戶能夠在該方法中對消息作任何操做,但最好保證不要修改消息所屬的topic和分區,不然會影響目標分區的計算
(3)onAcknowledgement(RecordMetadata, Exception):
該方法會在消息被應答或消息發送失敗時調用,而且一般都是在producer回調邏輯觸發以前。onAcknowledgement運行在producer的IO線程中,所以不要在該方法中放入很重的邏輯,不然會拖慢producer的消息發送效率
(4)close:
關閉interceptor,主要用於執行一些資源清理工做
如前所述,interceptor可能被運行在多個線程中,所以在具體實現時用戶須要自行確保線程安全。另外假若指定了多個interceptor,則producer將按照指定順序調用它們,並僅僅是捕獲每一個interceptor可能拋出的異常記錄到錯誤日誌中而非在向上傳遞。這在使用過程當中要特別留意。
1)需求:
實現一個簡單的雙interceptor組成的攔截鏈。第一個interceptor會在消息發送前將時間戳信息加到消息value的最前部;第二個interceptor會在消息發送後更新成功發送消息數或失敗發送消息數。
2)案例實操
(1)增長時間戳攔截器
package com.xyg.kafka.interceptor; import org.apache.kafka.clients.producer.ProducerInterceptor; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import java.util.Map; public class TimeInterceptor implements ProducerInterceptor<String, String> { @Override public void configure(Map<String, ?> map) { } @Override public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) { // 建立一個新的record,把時間戳寫入消息體的最前部 return new ProducerRecord(record.topic(), record.partition(), record.timestamp(), record.key(), System.currentTimeMillis() + "," + record.value().toString()); } @Override public void onAcknowledgement(RecordMetadata recordMetadata, Exception e) { }
@Override public void close() { } }
(2)統計發送消息成功和發送失敗消息數,並在producer關閉時打印這兩個計數器
package com.xyg.kafka.interceptor; import org.apache.kafka.clients.producer.ProducerInterceptor; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import java.util.Map; public class CounterInterceptor implements ProducerInterceptor<String, String> { private int errorCounter = 0; private int successCounter = 0; @Override public void configure(Map<String, ?> configs) { } @Override public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) { return record; } @Override public void onAcknowledgement(RecordMetadata metadata, Exception exception) { // 統計成功和失敗的次數 if (exception == null) { successCounter++; } else { errorCounter++; } } @Override public void close() { // 保存結果 System.out.println("Successful sent: " + successCounter); System.out.println("Failed sent: " + errorCounter); } }
(3)producer主程序
package com.xyg.kafka.interceptor; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import java.util.ArrayList; import java.util.List; import java.util.Properties; public class InterceptorProducer { public static void main(String[] args) throws Exception { // 1 設置配置信息 Properties props = new Properties(); props.put("bootstrap.servers", "node21:9092"); props.put("acks", "all"); props.put("retries", 0); props.put("batch.size", 16384); props.put("linger.ms", 1); props.put("buffer.memory", 33554432); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // 2 構建攔截鏈 List<String> interceptors = new ArrayList<>(); interceptors.add("com.xyg.kafka.interceptor.TimeInterceptor"); interceptors.add("com.xyg.kafka.interceptor.CounterInterceptor"); props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, interceptors); String topic = "firstTopic"; Producer<String, String> producer = new KafkaProducer<>(props); // 3 發送消息 for (int i = 0; i < 10; i++) { ProducerRecord<String, String> record = new ProducerRecord<>(topic, "message" + i); producer.send(record); } // 4 必定要關閉producer,這樣纔會調用interceptor的close方法 producer.close(); } }
3)測試
(1)在kafka上啓動消費者,而後運行客戶端java程序。
[root@node21 kafka]$ bin/kafka-console-consumer.sh --zookeeper node21:2181,node22:2181,node23:2181 --from-beginning --topic firstTopic
1533465083631,message0
1533465084092,message3
1533465084092,message6
1533465084093,message9
1533465148033,message1
1533465148043,message4
1533465148044,message7
1533465154264,message0
1533465154650,message3
1533465154651,message6
1533465154651,message9
(2)觀察java平臺控制檯輸出數據以下:
Successful sent: 10
Failed sent: 0
Kafka Streams是一個客戶端庫,用於構建任務關鍵型實時應用程序和微服務,其中輸入和/或輸出數據存儲在Kafka集羣中。Kafka Streams結合了在客戶端編寫和部署標準Java和Scala應用程序的簡單性以及Kafka服務器端集羣技術的優點,使這些應用程序具備高度可擴展性,彈性,容錯性,分佈式等等。
1)功能強大
高擴展性,彈性,容錯
2)輕量級
無需專門的集羣
一個庫,而不是框架
3)徹底集成
100%的Kafka 0.10.0版本兼容
易於集成到現有的應用程序
4)實時性
毫秒級延遲
並不是微批處理
窗口容許亂序數據
容許遲到數據
當前已經有很是多的流式處理系統,最知名且應用最多的開源流式處理系統有Spark Streaming和Apache Storm。Apache Storm發展多年,應用普遍,提供記錄級別的處理能力,當前也支持SQL on Stream。而Spark Streaming基於Apache Spark,能夠很是方便與圖計算,SQL處理等集成,功能強大,對於熟悉其它Spark應用開發的用戶而言使用門檻低。另外,目前主流的Hadoop發行版,如Cloudera和Hortonworks,都集成了Apache Storm和Apache Spark,使得部署更容易。
既然Apache Spark與Apache Storm擁用如此多的優點,那爲什麼還須要Kafka Stream呢?主要有以下緣由。
第一,Spark和Storm都是流式處理框架,而Kafka Stream提供的是一個基於Kafka的流式處理類庫。框架要求開發者按照特定的方式去開發邏輯部分,供框架調用。開發者很難了解框架的具體運行方式,從而使得調試成本高,而且使用受限。而Kafka Stream做爲流式處理類庫,直接提供具體的類給開發者調用,整個應用的運行方式主要由開發者控制,方便使用和調試。
第二,雖然Cloudera與Hortonworks方便了Storm和Spark的部署,可是這些框架的部署仍然相對複雜。而Kafka Stream做爲類庫,能夠很是方便的嵌入應用程序中,它對應用的打包和部署基本沒有任何要求。
第三,就流式處理系統而言,基本都支持Kafka做爲數據源。例如Storm具備專門的kafka-spout,而Spark也提供專門的spark-streaming-kafka模塊。事實上,Kafka基本上是主流的流式處理系統的標準數據源。換言之,大部分流式系統中都已部署了Kafka,此時使用Kafka Stream的成本很是低。
第四,使用Storm或Spark Streaming時,須要爲框架自己的進程預留資源,如Storm的supervisor和Spark on YARN的node manager。即便對於應用實例而言,框架自己也會佔用部分資源,如Spark Streaming須要爲shuffle和storage預留內存。可是Kafka做爲類庫不佔用系統資源。
第五,因爲Kafka自己提供數據持久化,所以Kafka Stream提供滾動部署和滾動升級以及從新計算的能力。
第六,因爲Kafka Consumer Rebalance機制,Kafka Stream能夠在線動態調整並行度。
0)需求:
實時處理單詞帶有」>>>」前綴的內容。例如輸入」111>>>hadoop」,最終處理成「hadoop」
1)需求分析:
2)案例實操
(1)建立一個工程,pom文件引入依賴
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-streams --> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-streams</artifactId> <version>1.1.0</version> </dependency>
(2)建立主類(TopologyBuilder是過期的)
package com.xyg.kafka.stream; import java.util.Properties; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.processor.Processor; import org.apache.kafka.streams.processor.ProcessorSupplier; import org.apache.kafka.streams.processor.TopologyBuilder; public class StreamApplication { public static void main(String[] args) { // 定義輸入的topic String from = "firstTopic"; // 定義輸出的topic String to = "secondTopic"; // 設置參數 Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "logFilter"); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "node21:9092"); StreamsConfig config = new StreamsConfig(props); // 構建拓撲 TopologyBuilder builder = new TopologyBuilder(); builder.addSource("SOURCE", from) .addProcessor("PROCESS", new ProcessorSupplier<byte[], byte[]>() { @Override public Processor<byte[], byte[]> get() { // 具體分析處理 return new LogProcessor(); } }, "SOURCE") .addSink("SINK", to, "PROCESS"); // 建立kafka stream KafkaStreams streams = new KafkaStreams(builder, config); streams.start(); } }
(3)具體業務處理
package com.xyg.kafka.stream; import org.apache.kafka.streams.processor.Processor; import org.apache.kafka.streams.processor.ProcessorContext; public class LogProcessor implements Processor<byte[], byte[]> { private ProcessorContext context; @Override public void init(ProcessorContext context) { this.context = context; } @Override public void process(byte[] key, byte[] value) { String input = new String(value); // 若是包含「>>>」則只保留該標記後面的內容 if (input.contains(">>>")) { input = input.split(">>>")[1].trim(); // 輸出到下一個topic context.forward("logProcessor".getBytes(), input.getBytes()); }else{ context.forward("logProcessor".getBytes(), input.getBytes()); } } @Override public void punctuate(long timestamp) { } @Override public void close() { } }
(4)運行程序
(5)在node21上啓動生產者
[root@node21 kafka]$ bin/kafka-console-producer.sh --broker-list node21:9092,node22:9092,node23:9092 --topic firstTopic
>111>>>hadoop
>222>>>spark
>spark
(6)在node22上啓動消費者
[root@node22 kafka]$ bin/kafka-console-consumer.sh --bootstrap-server node21:9092,node22:9092,node23:9092 --from-beginning --topic secondTopic
hadoop
spark
spark
參考文檔:http://kafka.apache.org/11/documentation/streams/
package com.xyg.kafka.stream; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.Produced; import org.apache.kafka.streams.state.KeyValueStore; import java.util.Arrays; import java.util.Properties; public class WordCountApplication { public static void main(final String[] args) throws Exception { Properties config = new Properties(); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application"); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "node21:9092"); config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); StreamsBuilder builder = new StreamsBuilder(); KStream<String, String> textLines = builder.stream("TextLinesTopic"); KTable<String, Long> wordCounts = textLines .flatMapValues(textLine -> Arrays.asList(textLine.toLowerCase().split("\\W+"))) .groupBy((key, word) -> word) .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store")); wordCounts.toStream().to("WordsWithCountsTopic", Produced.with(Serdes.String(), Serdes.Long())); KafkaStreams streams = new KafkaStreams(builder.build(), config); streams.start(); } }