Filebeat
是輕量級單用途的日誌收集工具,用於在沒有安裝java的服務器上專門收集日誌,能夠將日誌轉發到logstash
、elasticsearch
或redis
等場景中進行下一步處理。
官網下載地址:https://www.elastic.co/cn/downloads/past-releases#filebeat
官方文檔:https://www.elastic.co/guide/en/beats/filebeat/current/configuring-howto-filebeat.htmlhtml
1)下載filebeat
java
# 這裏是在logstash服務器上面作的,爲了測試,因此先將logstash中止。 [root@logstash ~]# systemctl stop logstash [root@logstash ~]# wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.8.1-x86_64.rpm
2)安裝filebeat
node
[root@logstash ~]# yum -y localinstall filebeat-6.8.1-x86_64.rpm
1)編輯filebeat
配置文件linux
[root@logstash ~]# cp /etc/filebeat/filebeat.yml{,.bak} [root@logstash ~]# grep -v "#" /etc/filebeat/filebeat.yml |grep -v "^$" filebeat.inputs: - type: log # 默認值 log ,表示一個日誌讀取源 enabled: true # 該配置是否生效,若是設置爲 false 將不會收集該配置的日誌 paths: - /var/log/messages # 要抓取的日誌路徑,寫絕對路徑,能夠多個 - /var/log/*.log filebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: false setup.template.settings: index.number_of_shards: 3 setup.kibana: output.file: path: "/tmp" filename: "filebeat.txt" processors: - add_host_metadata: ~ - add_cloud_metadata: ~ [root@logstash ~]# systemctl start filebeat
2)測試驗證數據redis
[root@logstash ~]# echo "test" >> /var/log/messages [root@logstash ~]# tail /tmp/filebeat.txt {"@timestamp":"2019-07-11T02:18:10.331Z","@metadata":{"beat":"filebeat","type":"doc","version":"6.8.1"},"prospector":{"type":"log"},"input":{"type":"log"},"beat":{"name":"logstash","hostname":"logstash","version":"6.8.1"},"host":{"architecture":"x86_64","os":{"platform":"centos","version":"7 (Core)","family":"redhat","name":"CentOS Linux","codename":"Core"},"id":"12bcfdc379904e4eb20173a568ecd7df","containerized":false,"name":"logstash"},"source":"/var/log/messages","offset":53643,"log":{"file":{"path":"/var/log/messages"}},"message":"Jul 11 10:18:10 node01 systemd: Stopping Filebeat sends log files to Logstash or directly to Elasticsearch...."} {"@timestamp":"2019-07-11T02:18:13.324Z","@metadata":{"beat":"filebeat","type":"doc","version":"6.8.1"},"prospector":{"type":"log"},"beat":{"version":"6.8.1","name":"logstash","hostname":"logstash"},"host":{"name":"logstash","architecture":"x86_64","os":{"family":"redhat","name":"CentOS Linux","codename":"Core","platform":"centos","version":"7 (Core)"},"id":"12bcfdc379904e4eb20173a568ecd7df","containerized":false},"log":{"file":{"path":"/var/log/messages"}},"message":"Jul 11 10:18:10 node01 systemd: Started Filebeat sends log files to Logstash or directly to Elasticsearch..","source":"/var/log/messages","offset":53754,"input":{"type":"log"}} {"@timestamp":"2019-07-11T02:18:13.324Z","@metadata":{"beat":"filebeat","type":"doc","version":"6.8.1"},"host":{"architecture":"x86_64","name":"logstash","os":{"codename":"Core","platform":"centos","version":"7 (Core)","family":"redhat","name":"CentOS Linux"},"id":"12bcfdc379904e4eb20173a568ecd7df","containerized":false},"source":"/var/log/messages","offset":53862,"log":{"file":{"path":"/var/log/messages"}},"message":"Jul 11 10:18:10 node01 systemd: Starting Filebeat sends log files to Logstash or directly to Elasticsearch....","prospector":{"type":"log"},"input":{"type":"log"},"beat":{"name":"logstash","hostname":"logstash","version":"6.8.1"}} {"@timestamp":"2019-07-11T02:18:48.328Z","@metadata":{"beat":"filebeat","type":"doc","version":"6.8.1"},"offset":53973,"log":{"file":{"path":"/var/log/messages"}},"message":"test","input":{"type":"log"},"prospector":{"type":"log"},"beat":{"name":"logstash","hostname":"logstash","version":"6.8.1"},"host":{"name":"logstash","os":{"version":"7 (Core)","family":"redhat","name":"CentOS Linux","codename":"Core","platform":"centos"},"id":"12bcfdc379904e4eb20173a568ecd7df","containerized":false,"architecture":"x86_64"},"source":"/var/log/messages"}
1)編輯filebeat
配置文件,修改輸出centos
[root@logstash ~]# grep -v "#" /etc/filebeat/filebeat.yml |grep -v "^$" filebeat.inputs: - type: log enabled: true paths: - /var/log/messages - /var/log/*.log filebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: false setup.template.settings: index.number_of_shards: 3 setup.kibana: output.redis: hosts: ["192.168.1.30:6379"] #redis服務器及端口 key: "system-log-33" #這裏自定義key的名稱,爲了後期處理 db: 1 #使用第幾個庫 timeout: 5 #超時時間 password: 123321 #redis 密碼 processors: - add_host_metadata: ~ - add_cloud_metadata: ~ [root@logstash ~]# systemctl restart filebeat
2)驗證redis
中是否有數據服務器
[root@linux-redis ~]# redis-cli -h 192.168.1.30 192.168.1.30:6379> AUTH 123321 OK 192.168.1.30:6379> SELECT 1 OK 192.168.1.30:6379[1]> KEYS * 1) "system-log-33" 192.168.1.30:6379[1]> LLEN system-log-33 (integer) 3
3)logstash
服務器上面配置從redis
服務器中取數據elasticsearch
[root@linux-elk1 ~]# cat /etc/logstash/conf.d/redis-filebeat.conf input { redis { data_type => "list" host => "192.168.1.30" password => "123321" port => "6379" db => "1" key => "system-log-33" } } output { elasticsearch { hosts => ["192.168.1.31:9200"] index => "file-systemlog-%{+YYYY.MM.dd}" } } [root@linux-elk1 ~]# systemctl restart logstash
4)輸入測試數據到日誌文件裏ide
[root@logstash ~]# echo "11111111111111" >> /var/log/messages [root@logstash ~]# echo "2222222222" >> /var/log/messages [root@logstash ~]# echo "33333333" >> /var/log/messages
5)kibana
界面建立索引模式工具
6)驗證數據