# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # see org.apache.kafka.clients.producer.ProducerConfig for more details ############################# Producer Basics ############################# # list of brokers used for bootstrapping knowledge about the rest of the cluster # format: host1:port1,host2:port2 ... #bootstrap.servers=localhost:9092 # specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd #compression.type=none # name of the partitioner class for partitioning events; default partition spreads data randomly #partitioner.class= # the maximum amount of time the client will wait for the response of a request #request.timeout.ms= # how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for #max.block.ms= # the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together #linger.ms= # the maximum size of a request in bytes #max.request.size= # the default batch size in bytes when batching multiple records sent to a partition #batch.size= # the total bytes of memory the producer can use to buffer records waiting to be sent to the server #buffer.memory= #指定kafka節點列表,用於獲取metadata,沒必要所有指定 metadata.broker.list=192.168.142.145:9092,192.168.142.146:9092,192.168.142.147:9092 #指定分區處理類。默認kafka.producer.DefaultPartitioner,表示經過key哈希到對應分區 #partitioner.class=kafka.producer.DefaultPartitioner #是否壓縮,默認0表示不壓縮,1表示gzip壓縮,2表示snappy壓縮,壓縮後消息中會有頭來指明消息壓縮類型,故在消費者端消息解壓是透明的無需指定。 compression.codec=none #指定序列化處理類 serializer.class=kafka.serializer.DefaultEncoder #若是要壓縮消息,這裏指定那些topic要壓縮消息,默認empty,表示不壓縮。 #compressed.topics= #設置發送數據是否須要服務端的反饋,有三個值0,1,-1 #0:producer不會等待broker發送ack #1:當leader接收到消息以後發送ack #-1:當全部的follower都同步消息成功後發送ack request.required.acks=0 #在向producer發送ack以前,broker容許等待的最大時間,若是超時,broker將會向producer發送一個error ack,意味着上一次消息由於某種緣由未能成功(好比followers未能同步成功) request.timeout.ms=10000 #同步仍是異步發送消息,默認"sync"表同步,"async"表異步,異步能夠提升發送吞吐量,也意味着消息將會在本地buffer中,並適時批量發送,可是也可能致使丟失未發送過去的消息 producer.type=sync #在async模式下,當message被緩存的時間超過此值後,將會批量發送給broker,默認爲5000ms #此值和batch.num.messages協同工做 queue.buffering.max.ms=5000 #在async模式下,producer端容許buffer的最大消息量 #不管如何,producer都沒法儘快的將消息發送給broker,從而致使在producer端大量沉積 #此時,若是消息的條數達到閥值,將會致使producer端阻塞或者消息被拋棄,默認爲10000 queue.buffering.max.messages=20000 #若是是異步,指定每次批量發送數據量,默認爲200 batch.num.messages=500 #當消息在producer端沉積的條數達到"queue.buffering.max.messages"後 #阻塞必定時間後,隊列任然沒有enqueue(producer任然沒有發送任何消息) #此時producer能夠繼續阻塞或者將消息拋棄,此timeout值用於控制"阻塞"的時間 #-1:無阻塞超時限制,消息不會被拋棄 #0:當即清空隊列,消息被拋棄 queue.enqueue.timeout.ms=-1