kafka初識

  • 啓動server。node

./kafka_2.11-2.0.0/bin/kafka-server-start.sh ./kafka_2.11-2.0.0/config/server.properties

# properties中默認的端口是9092,若是須要修改的話,修改位置以下:

listeners=PLAINTEXT://:9090
  • 建立topicapache

kafka-topics.sh --create --zookeeper yun1:2181 --replication-factor 1 --partitions 1 --topic test

# partitions : 分區數量

replication-factor:須要同步信息的節點數量(備份數量)

 

  • 啓動producerbootstrap

./kafka_2.11-2.0.0/bin/kafka-console-producer.sh --broker-list localhost:9090 --topic test-create

# broker-list:節點信息,若是有多個的話,ip1:port1,ip2:port2,ip3:port3

 

  • 啓動consumercode

./kafka-console-consumer.sh --bootstrap-server localhost:9090 --topic test-create --from-beginning

# bootstrap-server:須要鏈接的kafka server(舊版本的使用zookeeper:port來鏈接,後續版本這個參數已經不推薦)

from-beginning:獲取該topic上面的全部已經發不過的消息。
  • 異常處理server

    • 當producer或則consumer鏈接的server端口與不對的話。producer、consumer會報出以下錯誤
      
      [2018-08-02 14:40:32,612] WARN [Consumer clientId=consumer-1, groupId=console-consumer-70274] Connection to node 0 could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient)

       

    • 排查錯誤ip

    •  

    • 查看zookeeper的輸出信息。默認使用的broker id 是0 
      
      
      
      ./zkCli.sh -server yun1:2181 <<< "get /brokers/ids/0」
      
      
      
      輸出信息,broker=0的port啓動的端口是9092
      
      
      
      WatchedEvent state:SyncConnected type:None path:null
      
      {"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://10.1.162.15:9092"],"jmx_port":-1,"host":"10.1.162.15","timestamp":"1533190983702","port":9092,"version":4}
      
      cZxid = 0x78c
      
      ctime = Thu Aug 02 14:23:03 CST 2018
      
      mZxid = 0x78c
      
      mtime = Thu Aug 02 14:23:03 CST 2018
      
      pZxid = 0x78c
      
      cversion = 0
      
      dataVersion = 0
      
      aclVersion = 0
      
      ephemeralOwner = 0x1644a37232e01d6
      
      dataLength = 192
      
      numChildren = 0
      
      [zk: yun1:2181(CONNECTED) 1] %

       

  • 查看topic信息get

./kafka-topics.sh --describe --zookeeper yun1:2181 --topic topic-repication

# 有三個節點相互備份。三個分區

############################################################

Topic:topic-repication    PartitionCount:3    ReplicationFactor:3    Configs:

    Topic: topic-repication    Partition: 0    Leader: 1    Replicas: 1,0,2    Isr: 1,0,2

    Topic: topic-repication    Partition: 1    Leader: 2    Replicas: 2,1,0    Isr: 2,1,0

    Topic: topic-repication    Partition: 2    Leader: 0    Replicas: 0,2,1    Isr: 0,2,1



#leader:該節點負責該分區的全部讀寫。每一個節點都是隨機選擇的

#replicas:備份的節點列表

#Isr:同步備份的節點列表,活着的節點而且正在同步leader
相關文章
相關標籤/搜索