filebeat日誌採集

架構一:
filebeat -> logstash1 -> redis -> logstash2 -> elasticsearch(集羣) -> kibana
這裏就不寫安裝程序的步驟了相信你們都沒有難度:
(軟件安裝可自行設計)
230,安裝filebeat, logstash1 ,elasticsearch
232,安裝logstash2, redis, elasticsearch  ,kibana

注意:filebeat文件很注重文件格式
1,配置filebeat文件:
[root@localhost filebeat]# cat /etc/filebeat/filebeat.yml
filebeat:
  prospectors:
   # - #每一個日誌文件的開始
   #   paths: #定義路徑
   #     - /var/www/logs/access.log #絕對路徑
   #   input_type: log #日誌類型爲log
   #   document_type: api4-nginx-accesslog # 此名稱要與logstash定義的名稱相對應,logstash要使用此名稱作type判斷使用
    -
      paths:
        - /opt/apps/huhu/logs/ase.log
      input_type: log
      document_type: "ase-ase-log"
      encoding: utf-8
      tail_files: true  #每次最後一行
      multiline.pattern: '^\[' #分割符
      multiline.negate: true
      multiline.match: after    #最後合併
      #tags: ["ase-ase"]

    -
      paths:   #收集json格式日誌
        - /var/log/nginx/access.log
      input_type: log
      document_type: "nginx-access-log"
      tail_files: true
      json.keys_under_root: true      
      json.overwrite_keys: true  

  registry_file: /var/lib/filebeat/registry
output:      #輸出到230
  logstash:
    hosts: ["192.168.0.230:5044"]

shipper:
  logging:
    to_files: true
    files:
      path: /tmp/mybeat

 2.配置230:logstash-->input-redis
[root@web1 conf.d]# pwd
/etc/logstash/conf.d
[root@web1 conf.d]# cat nginx-ase-input.conf 
input {
        beats {
        port => 5044
        codec => "json"
        }}

output {                         
        if [type] == "nginx-access-log" {
        redis {                            #nginx日誌寫到redis信息
                data_type => "list"
                key => "nginx-accesslog"
                host => "192.168.0.232"
                port => "6379"
                db => "4"
                password => "123456"
        }}
        if [type] == "ase-ase-log" {
        redis {                            #寫到ase日誌寫到redis信息
                data_type => "list"
                key => "ase-log"
                host => "192.168.0.232"
                port => "6379"
                db => "4"
                password => "123456"
        }}      

}

  3.redis寫到elstach裏,232服務器配置:logstash-->output-->resid->elasticsearch
[root@localhost conf.d]# pwd
/etc/logstash/conf.d
[root@localhost conf.d]# cat nginx-ase-output.conf 
input {
        redis {
               type => "nginx-access-log"
                data_type => "list"
                key => "nginx-accesslog"
                host => "192.168.0.232"
                port => "6379"
                db => "4"
                password => "123456"
                codec  => "json"
        }

        redis {
                type => "ase-ase-log"
                data_type => "list"
                key => "ase-log"
                host => "192.168.0.232"
                port => "6379"
                db => "4"
                password => "123456"
        }
}

output {
    if [type] == "nginx-access-log" { 
        elasticsearch {  
            hosts => ["192.168.0.232:9200"] 
            index => "nginx-accesslog-%{+YYYY.MM.dd}" 
    }}
    if [type] == "ase-ase-log" {
            elasticsearch {
                hosts => ["192.168.0.232:9200"]
                index => "ase-log-%{+YYYY.MM.dd}"
        }}
}

4,在232上配置elsaticsearch--->kibana
在kibana上找到ELS的索引便可。

架構二:
filebeat -> redis -> logstash --> elsasctic --> kibana  #缺點filebeat寫進redis有限制,佔時還沒找到多個寫入。

1.feilebeat配置:
[root@localhost yes_yml]# cat filebeat.yml 
filebeat:
  prospectors:
   # - #每一個日誌文件的開始
   #   paths: #定義路徑
   #     - /var/www/logs/access.log #絕對路徑
   #   input_type: log #日誌類型爲log
   #   document_type: api4-nginx-accesslog # 此名稱要與logstash定義的名稱相對應,logstash要使用此名稱作type判斷使用
    -
      paths:
        - /opt/apps/qpq/logs/qpq.log
      input_type: log
      document_type: "qpq-qpq-log"
      encoding: utf-8
      tail_files: true
      multiline.pattern: '^\['
      multiline.negate: true
      multiline.match: after
   #tags: ["qpq-qpq-log"]
  registry_file: /var/lib/filebeat/registry

output:
  redis:
      host: "192.168.0.232"
      port: 6379
      db: 3
      password: "123456"
      timeout: 5
      reconnect_interval: 1
      index: "pqp-pqp-log"

shipper:
  logging:
    to_files: true
    files:
      path: /tmp/mybeat

2.由232redis-->els--kibana
[root@localhost yes_yml]# cat systemlog.conf 
input {
   redis {
        type => "qpq-qpq-log"
        data_type => "list"
        key => "qpq-pqp-log"
        host => "192.168.0.232"
        port => "6379"
        db => "3" 
        password => "123456"
        }}
output {
   if [type] == "qpq-qpq-log"{
      elasticsearch {  
            hosts => ["192.168.0.232:9200"] 
            index => "qpq-qpq-log-%{+YYYY.MM.dd}" 

 }

}
}

3.在232上配置elsaticsearch--->kibana
在kibana上找到ELS的索引便可
相關文章
相關標籤/搜索