Kafka教程

一、Kafka基礎知識java

重要概念:git

Brokergithub

Partitionvim

topicapi

producer瀏覽器

consumerbash

offsetsession

kafka的性能是和數據量無關的常量級,故保存大量的數據不是問題app

集羣中的每一個服務都會同時扮演兩個角色:做爲它所持有的一部分分區的leader,同時做爲其餘分區的followers,這樣集羣就會據有較好的負載均衡負載均衡

 

發佈消息一般有兩種模式

隊列模式(queuing):隊列模式中,consumers能夠同時從服務端讀取消息,每一個消息只被其中一個consumer讀到;

發佈-訂閱模式(publish-subscribe)

 

二、Kafka安裝

下載

啓動zookeeper,可用自行安裝的,

./zkServer.sh start

也可用內置的

./bin/zookeeper-server-start.sh config/zookeeper.properties &

而後啓動kafka

向server.properties配置文件中添加配置

# host.name爲本機ip, 不知道配成0.0.0.0是否能夠
host.name=192.168.159.129

然後啓動kafka

./bin/kafka-server-start.sh config/server.properties

 

查看現有topic

./bin/kafka-topics.sh --list --zookeeper localhost:2181

新建topic

./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --

 

進入producer發送消息

./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

進入consumer消費消息

./bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

 

3 、給kafka安裝一個圖形化的監控

#下載kafka-manager源碼
wget https://github.com/yahoo/kafka-manager/archive/1.3.0.8.zip
unzip 1.3.0.8.zip
cd unzip 1.3.0.8.zip
cd kafka-manager-1.3.0.8/


# 若是沒有sbt,則須要安裝sbt
curl https://bintray.com/sbt/rpm/rpm > bintray-sbt-rpm.repo
sudo mv bintray-sbt-rpm.repo /etc/yum.repos.d/
sudo yum install sbt

#這一步很是很是慢
sbt clean dist   

mkdir /opt/KafkaManager
cp /opt/kafka-manager-1.3.0.8/target/universal/kafka-manager-1.3.0.8.zip /opt/KafkaManger
cd /opt/KafkaManager/
unzip kafka-manager-1.3.0.8.zip
cd kafka-manager-1.3.0.8/conf

 

vim application.conf

配置zookeeper

....
kafka-manager.zkhosts="${IP}:2181"            #配置zk
...

啓動kafka-manager

cd /opt/KafkaManager/kafka-manager-1.3.0.8

./bin/kafka-manager

打開瀏覽器,打開地址 192.168.159.129:9000,

 

四、Kafka集成到系統

Java 鏈接Kafka代碼

配置信息

package kafka;

public interface KafkaProperties
{
    final static String zkConnect = "192.168.159.129:2181";
    final static String groupId = "group1";
    final static String topic = "topic2";
    final static String kafkaServerURL = "192.168.159.129:9092";
    final static int kafkaServerPort = 9092;
    final static int kafkaProducerBufferSize = 64 * 1024;
    final static int connectionTimeOut = 20000;
    final static int reconnectInterval = 10000;
    final static String topic2 = "topic2";
    final static String topic3 = "topic3";
    final static String clientId = "SimpleConsumerDemoClient";
}

生產者代碼

package kafka;

import java.util.Properties;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

public class KafkaProducer extends Thread
{
    private final kafka.javaapi.producer.Producer<Integer, String> producer;
    private final String topic;
    private final Properties props = new Properties();
    public KafkaProducer(String topic)
    {
        props.put("zookeeper.connect",KafkaProperties.zkConnect);
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put("metadata.broker.list", KafkaProperties.kafkaServerURL);
        producer = new kafka.javaapi.producer.Producer<Integer, String>(new ProducerConfig(props));
        this.topic = topic;
    }
    @Override
    public void run() {
        int messageNo = 1;
        while (true)
        {
            String messageStr = new String("Message_" + messageNo);
            System.out.println("Send:" + messageStr);
            producer.send(new KeyedMessage<Integer, String>(topic, messageStr));
            messageNo++;
            try {
                sleep(3000);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }

    public static void main(String[] args) {
        KafkaProducer kafkaProducer = new KafkaProducer(KafkaProperties.topic);
        kafkaProducer.start();
    }
}

消費者代碼

package kafka;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

public class KafkaConsumer extends Thread
{
    private final ConsumerConnector consumer;
    private final String topic;
    public KafkaConsumer(String topic)
    {
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
                createConsumerConfig());
        this.topic = topic;
    }
    private static ConsumerConfig createConsumerConfig()
    {
        Properties props = new Properties();
        props.put("zookeeper.connect",KafkaProperties.zkConnect);
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put("metadata.broker.list", KafkaProperties.kafkaServerURL);
        props.put("group.id", KafkaProperties.groupId);
        props.put("zookeeper.session.timeout.ms", "40000");
        props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "1000");
        return new ConsumerConfig(props);
    }
    @Override
    public void run() {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(1));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer
                                 .createMessageStreams(topicCountMap);
        KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
        ConsumerIterator<byte[], byte[]> it = stream.iterator();
        while (it.hasNext()) {
            System.out.println("receive:" + new String(it.next().message()));
            try {
                sleep(3000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }

    public static void main(String[] args) {
        KafkaConsumer kafkaConsumer = new KafkaConsumer(KafkaProperties.topic);
        kafkaConsumer.start();
    }
}

 

Kafka與Spring的集成

相關文章
相關標籤/搜索