Kafka配置文件

############################# 服務基本設置 #############################

# The id of the broker. This must be set to a unique integer for each broker.
# kafka集羣分組ID
broker.id=1

############################# Socket Server 設置 #############################

listeners=PLAINTEXT://:9092

# socket服務監聽端口號
port=9092

# 主機名
host.name=192.168.30.65

# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured.  Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
advertised.host.name=192.168.30.65

# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>

# 處理網絡請求的線程數
num.network.threads=3

# 處理磁盤I/O的線程數
num.io.threads=8

# socket傳輸數據大小
socket.send.buffer.bytes=102400

# socket接收數據大小
socket.receive.buffer.bytes=102400

# socket最大請求字節數
socket.request.max.bytes=104857600


############################# 日誌配置 #############################

# 日誌存放目錄
log.dirs=/u01/kafka/kafka-logs

# 分區大小.
num.partitions=3

# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1

"server.properties" [只讀] 137L, 6294C                                                                            1,1          頂端
num.recovery.threads.per.data.dir=1

############################# Log Flush Policy #############################

# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
#    1. Durability: Unflushed data may be lost if you are not using replication.
#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.

# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000

# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000

############################# 日誌存儲策略 #############################

# 當達到下面的消息數量時,會將數據flush到日誌文件中。默認10000
**log.flush.interval.messages=10000
**# 當達到下面的時間(ms)時,執行一次強制的flush操做。interval.ms和interval.messages不管哪一個達到,都會flush。默認3000ms
**log.flush.interval.ms=1000**
# 檢查是否須要將日誌flush的時間間隔
**log.flush.scheduler.interval.ms=3000**
# 日誌保存時間 (hours|minutes),默認爲7天(168小時)。超過這個時間會根據policy處理數據。bytes和minutes不管哪一個先達到都會觸發。
**log.retention.hours=24**
# 日誌數據存儲的最大字節數。超過這個時間會根據policy處理數據。
**log.retention.bytes=21474836480**

# 控制日誌segment文件的大小,超出該大小則追加到一個新的日誌segment文件中(-1表示沒有限制)
**log.segment.bytes=1073741824**
# 當達到下面時間,會強制新建一個segment
**log.roll.hours=168**
# 日誌片斷文件的檢查週期,查看它們是否達到了刪除策略的設置(log.retention.hours或log.retention.bytes)
**log.retention.check.interval.ms=60000**

# 是否開啓壓縮
**log.cleaner.enable=false**
# 日誌清理策略(delete|compact)
**log.cleanup.policy=delete**
# 對於壓縮的日誌保留的最長時間
**log.cleaner.delete.retention.ms=86400000**

# 對於segment日誌的索引文件大小限制
**log.index.size.max.bytes=10485760**
#y索引計算的一個緩衝區,通常不須要設置。
**log.index.interval.bytes=4096**

############################# Zookeeper配置 #############################

# zookeeper節點
#zookeeper.connect=192.168.30.65:2188,192.168.30.66:2188,192.168.30.67:2188
**zookeeper.connect=192.168.30.65:2188**

# zookeeper網絡鏈接超時時間
**zookeeper.connection.timeout.ms=6000 **
相關文章
相關標籤/搜索