package com.doctor.logbackextend; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import kafka.consumer.Consumer; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; import org.apache.commons.lang.RandomStringUtils; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * zookeeper 和kafka環境準備好。本地端口號默認設置 * * @author doctor * * @time 2014年10月24日 下午3:14:01 */ public class KafkaAppenderTest { private static final Logger LOG = LoggerFactory.getLogger(KafkaAppenderTest.class); /** 先啓動此測試方法,模擬log日誌輸出到kafka */ @Test public void test_log_producer() { while(true){ LOG.info("test_log_producer : " + RandomStringUtils.random(3, "hello doctro,how are you,and you")); } } /** 再啓動此測試方法。模擬消費者獲取日誌,進而分析,此方法不過打印打控制檯,不是log。防止模擬log測試方法數據混淆 */ @Test public void test_comsumer(){ Properties props = new Properties(); props.put("zookeeper.connect", "127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183"); props.put("group.id", "kafkatest-group"); // props.put("zookeeper.session.timeout.ms", "400"); // props.put("zookeeper.sync.time.ms", "200"); // props.put("auto.commit.interval.ms", "1000"); ConsumerConfig paramConsumerConfig = new ConsumerConfig(props ); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(paramConsumerConfig ); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put("kafka-test", new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerStream = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerStream.get("kafka-test"); for (KafkaStream<byte[], byte[]> stream : streams) { ConsumerIterator<byte[], byte[]> it = stream.iterator(); while(it.hasNext()) System.out.println(new String("test_comsumer: " + new String(it.next().message()))); } } } html
爲了實時日誌處理。咱們選擇kafka集羣,日誌的處理分析選擇jstorm集羣,至於jstorm處理的結果,你可以選擇保存到數據庫裏。入hbase、mysql。maridb等。java
系統的日誌接口選擇了slf4j,logback組合,爲了讓系統的日誌能夠寫入kafka集羣,選擇擴展logback Appender。在logback裏配置一下。就行本身主動輸出日誌到kafka集羣。mysql
kafka的集羣安裝,在此不介紹了,爲了模擬真實性,zookeeper本地集羣也安裝部署了。sql
如下是怎樣擴展logback Appender數據庫
package com.doctor.logbackextend; import java.util.Properties; import kafka.javaapi.producer.Producer; import kafka.producer.KeyedMessage; import kafka.producer.ProducerConfig; import ch.qos.logback.classic.spi.ILoggingEvent; import ch.qos.logback.core.AppenderBase; public class KafkaAppender extends AppenderBase<ILoggingEvent> { private String topic; private String zookeeperHost; private String broker; private Producer<String, String> producer; private Formatter formatter; public String getBroker() { return broker; } public void setBroker(String broker) { this.broker = broker; } @Override protected void append(ILoggingEvent eventObject) { String message = this.formatter.formate(eventObject); this.producer.send(new KeyedMessage<String, String>(this.topic, message)); } @Override public void start() { if (this.formatter == null) { this.formatter = new MessageFormatter(); } super.start(); Properties props = new Properties(); props.put("zk.connect", this.zookeeperHost); props.put("metadata.broker.list", this.broker); props.put("serializer.class", "kafka.serializer.StringEncoder"); ProducerConfig config = new ProducerConfig(props); this.producer = new Producer<String, String>(config); } @Override public void stop() { super.stop(); this.producer.close(); } public String getTopic() { return topic; } public void setTopic(String topic) { this.topic = topic; } public String getZookeeperHost() { return zookeeperHost; } public void setZookeeperHost(String zookeeperHost) { this.zookeeperHost = zookeeperHost; } public Producer<String, String> getProducer() { return producer; } public void setProducer(Producer<String, String> producer) { this.producer = producer; } public Formatter getFormatter() { return formatter; } public void setFormatter(Formatter formatter) { this.formatter = formatter; } /** * 格式化日誌格式 * @author doctor * * @time 2014年10月24日 上午10:37:17 */ interface Formatter{ String formate(ILoggingEvent event); } public static class MessageFormatter implements Formatter{ @Override public String formate(ILoggingEvent event) { return event.getFormattedMessage(); } } }
<appender name="kafka" class="com.doctor.logbackextend.KafkaAppender"> <topic>kafka-test</topic> <!-- <zookeeperHost>127.0.0.1:2181</zookeeperHost> --> <!-- <broker>127.0.0.1:9092</broker> --> <zookeeperHost>127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183</zookeeperHost> <broker>127.0.0.1:9092,127.0.0.1:9093</broker> </appender> <root level="all"> <appender-ref ref="stdout" /> <appender-ref ref="defaultAppender" /> <appender-ref ref="kafka" /> </root>
<zookeeperHost>我本地啓動了三個zookeer。依據配置。你可以知道是怎樣配置的吧。
kafka集羣的broker我配置了兩個,都是在本地機器。apache
測試代碼:json
package com.doctor.logbackextend; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import kafka.consumer.Consumer; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; import org.apache.commons.lang.RandomStringUtils; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * zookeeper 和kafka環境準備好。本地端口號默認設置 * * @author doctor * * @time 2014年10月24日 下午3:14:01 */ public class KafkaAppenderTest { private static final Logger LOG = LoggerFactory.getLogger(KafkaAppenderTest.class); /** 先啓動此測試方法,模擬log日誌輸出到kafka */ @Test public void test_log_producer() { while(true){ LOG.info("test_log_producer : " + RandomStringUtils.random(3, "hello doctro,how are you,and you")); } } /** 再啓動此測試方法,模擬消費者獲取日誌,進而分析,此方法不過打印打控制檯,不是log。防止模擬log測試方法數據混淆 */ @Test public void test_comsumer(){ Properties props = new Properties(); props.put("zookeeper.connect", "127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183"); props.put("group.id", "kafkatest-group"); // props.put("zookeeper.session.timeout.ms", "400"); // props.put("zookeeper.sync.time.ms", "200"); // props.put("auto.commit.interval.ms", "1000"); ConsumerConfig paramConsumerConfig = new ConsumerConfig(props ); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(paramConsumerConfig ); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put("kafka-test", new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerStream = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerStream.get("kafka-test"); for (KafkaStream<byte[], byte[]> stream : streams) { ConsumerIterator<byte[], byte[]> it = stream.iterator(); while(it.hasNext()) System.out.println(new String("test_comsumer: " + new String(it.next().message()))); } } }
版權聲明:本文博客原創文章,博客,未經贊成,不得轉載。api