filebeat配置:java
filebeat.inputs:
- type: log
enabled: true
paths:
- /opt/xxxx.log
fields: 在filebeat收集的消息後面新增長字段,用於後面logstash的區分,分別放置於不一樣的索引
service: xxx
fields_under_root: truenode
- type: log
enabled: true
paths:
- /xxx.log
fields:
service: xxxxx
fields_under_root: true
multiline.pattern: '^Caused by' ---用於收集java異常日誌
multiline.negate: true
multiline.match: after
multiline.max_lines: 1000
output.redis:
hosts: ["xxx:6379"]
db: 0
time: 10
key: "xx" 放置於redis中的key值redis
logstash配置:elasticsearch
input {
redis {
host => "127.0.0.1"
data_type => "list"
key => "xxx" 與上述配置filebeat中的redis值保持一致
}tcp
}日誌
output {索引
if [service] == "xxx" {
elasticsearch {
hosts => ["http://xxx:9200"]
index => "xxx-%{+YYYY.MM.dd}"
}
}input
if [service] == "xxx" {
elasticsearch {
hosts => ["http://10.157.25.7:9200"]
index => "xxx-%{+YYYY.MM.dd}"
}
}ast
}
sed
elasticsearch配置:
cluster.name: my-clusternode.name: es-node-3path.data: /data/es/path.logs: /data/logs/network.host:xxxxhttp.port: 9200transport.tcp.port: 9300discovery.zen.ping.unicast.hosts: ["xx","xx","xx"]discovery.zen.minimum_master_nodes: 2discovery.zen.ping_timeout: 60s