架構圖以下:node
說明:經過input收集日誌消息放入消息隊列服務中(redis,MSMQ、Resque、ActiveMQ,RabbitMQ),再經過output取出消息寫入ES上,kibana顯示。nginx
好處:鬆耦合,下降logstash收集日誌的負載對業務服務不受影響,先後端分離,消息能存儲不影響ES維護。redis
下面咱們就用redis作消息隊列存儲,架構以下:json
#安裝redis,修改redis配置文件,bind和protected-mode後端
[root@elk-node1 conf.d]# yum install -y redis [root@elk-node1 conf.d]# cp /etc/redis.conf{,.bak} [root@elk-node1 conf.d]# grep "^[a-z]" /etc/redis.conf bind 192.168.247.135 protected-mode yes port 6379 tcp-backlog 511 timeout 0 tcp-keepalive 300 daemonize yes supervised no ...
#啓動redis服務bash
[root@elk-node1 conf.d]# systemctl start redis You have new mail in /var/spool/mail/root [root@elk-node1 conf.d]# ss -lntp|grep 6379 LISTEN 0 511 192.168.247.135:6379 *:* users:(("redis-server",pid=18387,fd=4)) You have new mail in /var/spool/mail/root [root@elk-node1 conf.d]# grep "^[a-z]" /etc/redis.conf^C [root@elk-node1 conf.d]# redis-cli -h 192.168.247.135 192.168.247.135:6379> exit
#編寫測試文件架構
[root@elk-node1 conf.d]# cat redis-out.conf input{ stdin{ } } output{ redis{ host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "demo" } }
#logstash配置文件運行輸入hello world
[root@elk-node1 conf.d]# /opt/logstash/bin/logstash -f /etc/logstash/conf.d/redis-out.conf
Settings: Default filter workers: 1
hello world
#另開一個窗口登陸redis能夠看到一條咱們剛輸入的hello world消息前後端分離
[root@elk-node1 ~]# redis-cli -h 192.168.247.135 192.168.247.135:6379> info Logstash startup completed # Keyspace db6:keys=1,expires=0,avg_ttl=0 192.168.247.135:6379> select 6 OK 192.168.247.135:6379[6]> key * (error) ERR unknown command 'key' 192.168.247.135:6379[6]> keys * 1) "demo" 192.168.247.135:6379[6]> LINDEX demo -1 "{\"message\":\"hello world\",\"@version\":\"1\",\"@timestamp\":\"2018-07-28T06:44:50.418Z\",\"host\":\"elk-node1\"}" 192.168.247.135:6379[6]>
#接下來咱們把消息寫入ES,首先再輸入多條消息elasticsearch
[root@elk-node1 conf.d]# /opt/logstash/bin/logstash -f /etc/logstash/conf.d/redis-out.conf Settings: Default filter workers: 1 Logstash startup completed fsadf dgdf gdg ad fd ds cd g rgergerg rg qrg rh rg q 34tr 34 f gdf df df f sdv sdf re ter t4 ^CSIGINT received. Shutting down the pipeline. {:level=>:warn} Logstash shutdown completed
#寫一個輸入到ES的配置文件tcp
[root@elk-node1 conf.d]# cat redis-int.conf input{ redis{ host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "demo" } } output{ elasticsearch { hosts => ["192.168.247.135:9200"] index => "redis-demo-%{+YYYY.MM.dd}" } }
#logstash配置文件運行
[root@elk-node1 conf.d]# /opt/logstash/bin/logstash -f /etc/logstash/conf.d/redis-int.conf
Settings: Default filter workers: 1
Logstash startup completed
#這時咱們看redis上的消息已經被消費了
192.168.247.135:6379[6]> LLEN demo
(integer) 0
咱們在登陸ES能夠看到已經有記錄了
#寫一個系統監控的配置文件把日誌寫入redis,inpout裏讀取日誌消息,output裏寫入redis。
[root@elk-node1 conf.d]# cat shipper.conf input { file { path => "/var/log/messages" type => "system" start_position => "beginning" } file { path => "/var/log/elasticsearch/hejianlai.log" type => "es-error" start_position => "beginning" codec => multiline { pattern => "^\[" negate => true what => "previous" } } file { path => "/var/log/nginx/access_json.log" codec => json start_position => "beginning" type => "nginx-log" } syslog{ type => "system-syslog" host => "192.168.247.135" port => "514" } } output { if [type] == "system"{ redis{ host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "system" } } if [type] == "es-error"{ redis{ host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "es-error" } } if [type] == "nginx-log"{ redis{ host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "nginx-log" } } if [type] == "system-syslog"{ redis{ host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "system-syslog" } } }
#運行配置文件
[root@elk-node1 conf.d]# /opt/logstash/bin/logstash -f /etc/logstash/conf.d/shipper.conf
#查看redis已經生成了相應的key
192.168.247.135:6379[6]> keys *
1) "system"
2) "nginx-log"
3) "es-error"
192.168.247.135:6379[6]>
#寫一個配置文件從redis中把日誌寫入ES,inpout裏讀取redis消息,output裏寫入ES.
[root@elk-node2 conf.d]# cat display.conf input { redis{ type => "system-syslog" host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "system-syslog" } redis{ type => "system" host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "system" } redis{ type => "es-error" host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "es-error" } redis{ type => "nginx-log" host => "192.168.247.135" port => "6379" db => "6" data_type => "list" key => "nginx-log" } } output { if [type] == "system"{ elasticsearch { hosts => ["192.168.247.135:9200"] index => "systemlog-%{+YYYY.MM.dd}" } } if [type] == "es-error"{ elasticsearch { hosts => ["192.168.247.135:9200"] index => "es-error-%{+YYYY.MM.dd}" } } if [type] == "nginx-log"{ elasticsearch { hosts => ["192.168.247.135:9200"] index => "nginx-log-%{+YYYY.MM.dd}" } } if [type] == "system-syslog"{ elasticsearch { hosts => ["192.168.247.135:9200"] index => "system-syslog-log-%{+YYYY.MM.dd}" } } }
#運行配置文件,就能夠收集日誌了。
[root@elk-node2 conf.d]# /opt/logstash/bin/logstash -f /etc/logstash/conf.d/display.conf &
到此logstash+redis+elasticsearch+kibana的架構搭建基本結束~~~~~