利用docker能夠很方便的在一臺機子上搭建kafka集羣並進行測試。爲了簡化配置流程,採用docker-compose進行進行搭建。web
- 編寫docker-compose.yml文件,內容以下:
version: '3.3' services: zookeeper: image: wurstmeister/zookeeper ports: - 2181:2181 container_name: zookeeper networks: default: ipv4_address: 172.19.0.11 kafka0: image: wurstmeister/kafka depends_on: - zookeeper container_name: kafka0 ports: - 9092:9092 environment: KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:9092 KAFKA_LISTENERS: PLAINTEXT://kafka0:9092 KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_BROKER_ID: 0 volumes: - /root/data/kafka0/data:/data - /root/data/kafka0/log:/datalog networks: default: ipv4_address: 172.19.0.12 kafka1: image: wurstmeister/kafka depends_on: - zookeeper container_name: kafka1 ports: - 9093:9093 environment: KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9093 KAFKA_LISTENERS: PLAINTEXT://kafka1:9093 KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_BROKER_ID: 1 volumes: - /root/data/kafka1/data:/data - /root/data/kafka1/log:/datalog networks: default: ipv4_address: 172.19.0.13 kafka2: image: wurstmeister/kafka depends_on: - zookeeper container_name: kafka2 ports: - 9094:9094 environment: KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9094 KAFKA_LISTENERS: PLAINTEXT://kafka2:9094 KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_BROKER_ID: 2 volumes: - /root/data/kafka2/data:/data - /root/data/kafka2/log:/datalog networks: default: ipv4_address: 172.19.0.14 kafka-manager: image: sheepkiller/kafka-manager:latest restart: unless-stopped container_name: kafka-manager hostname: kafka-manager ports: - "9000:9000" links: # 鏈接本compose文件建立的container - kafka1 - kafka2 - kafka3 external_links: # 鏈接本compose文件之外的container - zookeeper environment: ZK_HOSTS: zoo1:2181 ## 修改:宿主機IP TZ: CST-8 networks: default: external: name: zookeeper_kafka
- 建立子網
docker network create --subnet 172.19.0.0/16 --gateway 172.19.0.1 zookeeper_kafka
- 執行docker-compose命令進行搭建
docker-compose -f docker-compose.yaml up -d
輸入docker ps -a
命令如能查看到咱們啓動的三個服務且處於運行狀態說明部署成功
- 測試kafka
輸入docker exec -it kafka0 bash
進入kafka0容器,並執行以下命令建立topic
cd /opt/kafka_2.13-2.6.0/bin/
./kafka-topics.sh --create --topic chat --partitions 5 --zookeeper 8.210.138.111:2181 --replication-factor 3
輸入以下命令開啓生產者
./kafka-console-producer.sh --broker-list kafka0:9092 --topic chat
開啓另外一個shell界面進入kafka2容器並執行下列命令開啓消費者
./kafka-console-consumer.sh --bootstrap-server kafka2:9094 --topic chat --from-beginning
回到生產者shell輸入消息,看消費者shell是否會出現一樣的消息,若是可以出現說明kafka集羣搭建正常。docker
--- --- kind: Deployment apiVersion: apps/v1 metadata: name: kafka-manager namespace: logging labels: name: kafka-manager spec: replicas: 1 selector: matchLabels: name: kafka-manager template: metadata: labels: app: kafka-manager name: kafka-manager spec: containers: - name: kafka-manager image: registry.cn-shenzhen.aliyuncs.com/zisefeizhu-baseimage/kafka:manager-latest ports: - containerPort: 9000 protocol: TCP env: - name: ZK_HOSTS value: 8.210.138.111:2181 - name: APPLICATION_SECRET value: letmein - name: TZ value: Asia/Shanghai imagePullPolicy: IfNotPresent restartPolicy: Always terminationGracePeriodSeconds: 30 securityContext: runAsUser: 0 schedulerName: default-scheduler strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 maxSurge: 1 revisionHistoryLimit: 7 progressDeadlineSeconds: 600 --- kind: Service apiVersion: v1 metadata: name: kafka-manager namespace: logging spec: ports: - protocol: TCP port: 9000 targetPort: 9000 selector: app: kafka-manager clusterIP: None type: ClusterIP sessionAffinity: None --- apiVersion: certmanager.k8s.io/v1alpha1 kind: ClusterIssuer metadata: name: letsencrypt-kafka-zisefeizhu-cn spec: acme: server: https://acme-v02.api.letsencrypt.org/directory email: linkun@zisefeizhu.com privateKeySecretRef: # 指示此簽發機構的私鑰將要存儲到哪一個Secret對象中 name: letsencrypt-kafka-zisefeizhu-cn solvers: - selector: dnsNames: - 'kafka.zisefeizhu.cn' dns01: webhook: config: accessKeyId: LTAI4G6JfRFW7DzuMyRGHTS2 accessKeySecretRef: key: accessKeySecret name: alidns-credentials regionId: "cn-shenzhen" ttl: 600 groupName: certmanager.webhook.alidns solverName: alidns --- apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: "kong" certmanager.k8s.io/cluster-issuer: "letsencrypt-kafka-zisefeizhu-cn" name: kafka-manager namespace: logging spec: tls: - hosts: - 'kafka.zisefeizhu.cn' secretName: kafka-zisefeizhu-cn-tls rules: - host: kafka.zisefeizhu.cn http: paths: - backend: serviceName: kafka-manager servicePort: 9000 path: /