ELK-第二集

收集nginx日誌和系統日誌寫到kafaka在用logstash讀取出來寫到elasticsearch
##node1 把nginx日誌寫到kafka裏面
[root@node1 conf.d]# vim /etc/logstash/conf.d/nginx.conf

input{
  file {
    path => "/var/log/nginx/access.log"
    type => "nginx-access-log-1105"
    start_position => "beginning"
    stat_interval => "2"
    codec => "json"
  }
  file {
    path => "/var/log/messages"
    type => "system-log-1105"
    start_position => "beginning"
    stat_interval => "2"
  }
}


output {
  if [type] == "nginx-access-log-1105" {
  kafka {
    bootstrap_servers => "192.168.1.106:9092"
    topic_id => "nginx-accesslog-1105"
    codec => "json"
}
}
  if [type] == "system-log-1105" {
    kafka {
    bootstrap_servers => "192.168.1.106:9092"
    topic_id => "system-log-1105"
    codec => "json"
}}
}


node2 ##從kafaka讀出來寫到elasticsearch

input {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topics => "nginx-accesslog-1105"
    group_id => "nginx-access-log"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
    kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topics => "system-log-1105"
    group_id => "nginx-access-log"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
}

output {
#  stdout {
#    codec => "rubydebug"
#  }
  if [type] == "nginx-access-log-1105" {
  elasticsearch {
  hosts => ["192.168.1.105:9200"]
  index => "logstash-nginx-access-log-1105-%{+YYYY.MM.dd}"
  }}
  if [type] == "system-log-1105" {
    elasticsearch {
  hosts => ["192.168.1.106:9200"]
  index => "logstash-systemzzz-log-1105-%{+YYYY.MM.dd}"
}
}
}

 在添加到kibana裏面java

 

##使用fileeat來收集日誌寫入kafkanode

node1 上傳filebeat-5.6.5-x86_64.rpmnginx

yum install filebeat-5.6.5-x86_64.rpm  -yjson

systemctl stop logstash.servicebootstrap

[root@node1 tmp]# grep -v "#"  /etc/filebeat/filebeat.yml | grep -v "^$"
filebeat.prospectors:
- input_type: log
  paths:
    - /var/log/*.log
    - /var/log/messages
  exclude_lines: ["^DBG"]
  exclude_files: [".gz$"]
  document_type: "system-log-1105-filebeat"
output.file:
  path: "/tmp"
  filename: "filebeat.txt"vim

 output.logstash:
  hosts: ["192.168.1.105:5044"]  #logstash 服務器地址,能夠是多個
  enabled: true #是否開啓輸出至logstash,默認即爲true
  worker: 1 #工做線程數
  comperession_level: 3 #壓縮級別
  #loadbalance: true #多個輸出的時候開啓負載ruby

[root@node1 src]# vim /etc/logstash/conf.d/filebate.confbash

input {
  beats {
    port => "5044"
    codec => "json"
}
}
output {
  if [type] == "system-log-1105-filebeat" {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topic_id => "system-log-filebeat-1105"
    codec => "json"
  }
}
}服務器

 [root@node1 conf.d]# systemctl restart logstash.serviceelasticsearch

[root@node1 conf.d]# /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 

node2##

[root@node2 conf.d]# vim kafka-es.conf

input {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topics => "system-log-filebeat-1105"
    group_id => "system-log-filebeat"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
}

output {
#  stdout {
#    codec => "rubydebug"
#  }
  if [type] == "system-log-1105-filebeat" {
    elasticsearch {
  hosts => ["192.168.1.106:9200"]
  index => "system-log-1105-filebeat-%{+YYYY.MM.dd}"
}
}
}

[root@node2 conf.d]# systemctl restart logstash.service

#測試  在node1 /var/log/messages裏添加東西,而後去9100端口去檢查,若是有證實正常,而後添加到kibana裏面。

##流程是,filebeat配置文件從messages裏面讀取而後輸出到192.168.1.105:5044端口上,而後logstash從本地的5044端口上讀取寫入kafka:9092端口,而後node2的logstash從input 192.168.1.105的9092端口讀取內容output輸出到elasticsearch上。

 

###收集nginx

[root@node1 conf.d]# vim /etc/filebeat/filebeat.yml

- input_type: log
  paths:
    - /var/log/nginx/access.log
  exclude_lines: ["^DBG"]
  exclude_files: [".gz$"]
  document_type: "nginx-accesslog-1105-filebeat"

output.logstash:
  hosts: ["192.168.1.105:5044"]  #logstash 服務器地址,能夠是多個
  enabled: true #是否開啓輸出至logstash,默認即爲true
  worker: 1 #工做線程數
  comperession_level: 3 #壓縮級別
  #loadbalance: true #多個輸出的時候開啓負載

[root@node1 src]# vim /etc/logstash/conf.d/filebate.conf

output {
  if [type] == "nginx-accesslog-1105-filebeat" {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topic_id => "nginx-accesslog-filebeat-1105"
    codec => "json"
}}
}

 

node2

input {
  kafka {
  bootstrap_servers => "192.168.1.105:9092"
  topics => "nginx-accesslog-filebeat-1105"
  group_id => "nginx-accesslog-filebeat"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
}

output {
  if [type] == "nginx-accesslog-1105-filebeat" {
    elasticsearch {
  hosts => ["192.168.1.106:9200"]
  index => "logstash-nginx-accesslog-1105-filebeat-%{+YYYY.MM.dd}"
}
}
}

[root@node2 conf.d]# systemctl restart logstash.service

 

##收集java日誌

[root@node1 conf.d]# vim /etc/logstash/conf.d/java.conf

input {
  file {
    path => "/var/log/logstash/logstash-plain.log"
    type => "javalog"
    codec => multiline {
    pattern => "^\[(\d{4}-\d{2}-\d{2})"
    negate => true
    what => "previous"
   }}
}

output {  elasticsearch {    hosts => ["192.168.1.105:9200"]    index => "javalog-1105-%{+YYYY.MM}"}}

相關文章
相關標籤/搜索