package cn.bigdata.kafka; import java.util.HashMap; import java.util.Map; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; public class KafkaProducerDemo { public static void main(String[] args) throws InterruptedException { /* Properties props = new Properties(); * props.put("bootstrap.servers", "localhost:9092"); * props.put("acks", "all"); * props.put("retries", 0); * props.put("batch.size", 16384); * props.put("linger.ms", 1); * props.put("buffer.memory", 33554432); * props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); * props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); */ // CommonClientConfigs ProducerConfig 能夠配置的參數都在這些類裏面 Map<String, Object> config = new HashMap<String, Object>(); // kafka對消息是按照key value形式存儲,這裏指定key和value的序列化方法 config.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); config.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // broker的地址 config.put("bootstrap.servers", "m2:9092"); // acks=0 客戶端不會等待服務端的確認 // acks=1 只會等待leader分區的確認 // acks=all或者acks=-1 等待leader分區和follower分區的確認 config.put("acks", "all"); Producer<String, String> producer = new KafkaProducer<String, String>(config); // 發送業務消息 // 讀取文件 讀取內存數據庫 讀socket端口 for (int i = 1; i <= 100; i++) { Thread.sleep(500); // 第一個參數是主題,第二個參數是消息 producer.send(new ProducerRecord<String, String>("test2", i + "")); } } }