1.kafka的配置參照spring
https://my.oschina.net/u/1591525/blog/2251910apache
2.flum配置 springboot
在flume的conf目錄下新建 kafka.properties測試
agent.sources = s1 spa
agent.channels = c1 .net
agent.sinks = k1 code
agent.sources.s1.type=exec server
agent.sources.s1.command=tail -F /tmp/logs/kafka.log blog
agent.sources.s1.channels=c1 ci
agent.channels.c1.type=memory
agent.channels.c1.capacity=10000
agent.channels.c1.transactionCapacity=100
#設置Kafka接收器
agent.sinks.k1.type= org.apache.flume.sink.kafka.KafkaSink
#設置Kafka的broker地址和端口號
agent.sinks.k1.brokerList=master:9092
#設置Kafka的Topic
agent.sinks.k1.topic=kafkatest
#設置序列化方式
agent.sinks.k1.serializer.class=kafka.serializer.StringEncoder
agent.sinks.k1.channel=c1
3.鏈接測試
啓動kafka和zk
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties &
bin/kafka-server-start.sh -daemon config/server.properties &
啓動flume
bin/flume-ng agent -n agent -c conf -f conf/kafka.properties
kafka生產者和消費者參照springboot和kafka集成配置