https://www.elastic.co/cn/downloads/past-releases#
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.5.4.rpmhtml
通俗來說,ELK是由Elasticsearch、Logstash、Kibana 三個開源軟件的組成的一個組合體,這三個軟件當中,每一個軟件用於完成不一樣的功能,ELK 又稱爲ELK stack,官方域名爲stactic.co,ELK stack的主要優勢有以下幾個:
處理方式靈活: elasticsearch是實時全文索引,具備強大的搜索功能
配置相對簡單:elasticsearch所有使用JSON 接口,logstash使用模塊配置,kibana的配置文件部分更簡單。
檢索性能高效:基於優秀的設計,雖然每次查詢都是實時,可是也能夠達到百億級數據的查詢秒級響應。
集羣線性擴展:elasticsearch和logstash均可以靈活線性擴展
前端操做絢麗:kibana的前端設計比較絢麗,並且操做簡單前端
Elasticsearch:
是一個高度可擴展的開源全文搜索和分析引擎,它可實現數據的實時全文搜索搜索、支持分佈式可實現高可用、提供API接口,能夠處理大規模日誌數據,好比Nginx、Tomcat、系統日誌等功能java
Logstash
能夠經過插件實現日誌收集和轉發,支持日誌過濾,支持普通log、自定義json格式的日誌解析node
kibana
主要是經過接口調用elasticsearch的數據,並進行前端數據可視化的展示python
1.ELK主要用戶日誌收集、存儲、分析、展現
2.解決開發查看日誌的需求,解決服務器用戶登陸的權限問題linux
rpm -ivh jdk-8u221-linux-x64.rpm
yum install elasticsearch-6.5.4.rpm -y
grep "^[a-Z]" /etc/elasticsearch/elasticsearch.yml
cluster.name: cluster-e #ELK的集羣名稱,名稱相同即屬因而同一個集羣
node.name: e1 #本機在集羣內的節點名稱
path.data: /esdata/data #數據保存目錄
path.logs: /esdata/logs #日誌保存目
network.host: 0.0.0.0 #監聽IP
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.10.100", "192.168.10.101"]
#bootstrap.memory_lock: true #服務啓動的時候鎖定足夠的內存,防止數據寫入swap,使用swap會影響性能
mkdir /esdata id elasticsearch uid=998(elasticsearch) gid=996(elasticsearch) groups=996(elasticsearch) chown 998.996 /esdata/ -R
###啓動服務nginx
systemctl start elasticsearch
tail -f /esdata/logs/cluster-e.log
9200用戶訪問端口
9300集羣內通訊端口,選舉端口git
curl http://192.168.10.100:9200/ { "name" : "e1", #本節點名稱 "cluster_name" : "cluster-e", #集羣名稱 "cluster_uuid" : "GSegz58CSrmLgAemNjIvpA", "version" : { "number" : "6.5.4", #elasticsearch版本號 "build_flavor" : "default", "build_type" : "rpm", "build_hash" : "d2ef93d", "build_date" : "2018-12-17T21:17:40.758843Z", "build_snapshot" : false, "lucene_version" : "7.5.0", #lucene版本號,elasticsearch給予lucene作搜索 "minimum_wire_compatibility_version" : "5.6.0", "minimum_index_compatibility_version" : "5.0.0" }, "tagline" : "You Know, for Search" #口號 } curl http://192.168.10.101:9200/ { "name" : "e2", "cluster_name" : "cluster-e", "cluster_uuid" : "GSegz58CSrmLgAemNjIvpA", "version" : { "number" : "6.5.4", "build_flavor" : "default", "build_type" : "rpm", "build_hash" : "d2ef93d", "build_date" : "2018-12-17T21:17:40.758843Z", "build_snapshot" : false, "lucene_version" : "7.5.0", "minimum_wire_compatibility_version" : "5.6.0", "minimum_index_compatibility_version" : "5.0.0" }, "tagline" : "You Know, for Search" }
vim /etc/elasticsearch/elasticsearch.yml bootstrap.memory_lock: true 去掉註釋
vim /etc/elasticsearch/jvm.options -Xms2g -Xmx2g vim /usr/lib/systemd/system/elasticsearch.service LimitMEMLOCK=infinity #去掉註釋 systemctl daemon-reload systemctl restart elasticsearch
elasticsearch-head 是一個用於瀏覽和與彈性搜索集羣交互的 Web 前端。
https://mobz.github.io/elasticsearch-head/
https://github.com/mobz/elasticsearch-headgithub
vim /etc/elasticsearch/elasticsearch.yml http.cors.enabled: true #最下方添加 http.cors.allow-origin: "*" yum install npm -y git clone git://github.com/mobz/elasticsearch-head.git cd elasticsearch-head npm install npm run start open http://localhost:9100/
yum install docker -y systemctl start docker && systemctl enable docker docker run -p 9100:9100 mobz/elasticsearch-head:5 open http://localhost:9100/
驗證索引是否存在
docker
查看數據
Master的職責:
統計各node節點狀態信息、集羣狀態信息統計、索引的建立和刪除、索引分配的管理、關閉node節點等
Slave的職責:
同步數據、等待機會成爲Master
獲取到的是一個json格式的返回值,那就能夠經過python對其中的信息進行分析,例如對status進行分析,若是等於green(綠色)就是運行在正常,等於yellow(黃色)表示副本分片丟失,red(紅色)表示主分片丟失
curl -sXGET http://192.168.10.100:9200/_cluster/health?pretty=true
cat els_monitor.py
#!/usr/bin/env python
#coding:utf-8
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
import subprocess
body = ""
false="false"
obj = subprocess.Popen(("curl -sXGET http://192.168.10.100:9200/_cluster/health?pretty=true"),shell=True, stdout=subprocess.PIPE)
data = obj.stdout.read()
data1 = eval(data)
status = data1.get("status")
if status == "green":
print "50"
else:
print "100"
#結果50爲正常 100爲異常
rpm -ivh logstash-6.5.4.rpm
測試輸出到文件
/usr/share/logstash/bin/logstash -e 'input { stdin{} } output { file { path => "/tmp/log-%{+YYYY.MM.dd}messages.gz"}}' hello file log-2019.08.16messages.gz log-2019.08.16messages.gz: ASCII text cat log-2019.08.16messages.gz {"host":"logstash1","@version":"1","@timestamp":"2019-08-16T05:27:35.612Z","message":"11:01:15.229 [[main]>worker1] INFO logstash.outputs.file - Opening file {:path=>\"/tmp/log-2017-04-20messages.gz\"}"} {"host":"logstash1","@version":"1","@timestamp":"2019-08-16T05:27:35.566Z","message":"hello"}
測試下配置文件
/usr/share/logstash/bin/logstash -e /etc/logstash/conf.d/log-es.conf -t
/usr/share/logstash/bin/logstash -e 'input { stdin{} } output { elasticsearch {hosts => ["192.168.10.100:9200"] index => "mytest-%{+YYYY.MM.dd}" }}'
ll /esdata/data/nodes/0/indices/ total 0 drwxr-xr-x 8 elasticsearch elasticsearch 65 Aug 16 13:18 bvx-zQINSA6t6hxXHsgbiQ drwxr-xr-x 8 elasticsearch elasticsearch 65 Aug 16 14:44 DWRgHkutQsSpH1HUCRJqcg drwxr-xr-x 3 elasticsearch elasticsearch 20 Aug 16 13:18 N7_a6rlZQTSuQ_PFxnPxKw drwxr-xr-x 4 elasticsearch elasticsearch 29 Aug 16 13:18 OyJkEOVMQr2oYLvRp0CZfg
[root@logstash1 ~]# cat /etc/logstash/conf.d/log-es.conf input { file { path => "/var/log/messages" #日誌路徑 type => "systemlog" #事件的惟一類型 start_position => "beginning" #第一次收集日誌的位置 stat_interval => "3" #日誌收集的間隔時間 } } output { elasticsearch { hosts => ["192.168.10.100:9200"] index => "192.168.10.102-syslog-%{+YYYY.MM.dd}" } }
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/log-es.conf -t [root@logstash1 ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/log-es.conf -t WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console [WARN ] 2019-08-16 16:31:50.625 [LogStash::Runner] multilocal - Ignoring the 'pipelines.yml' file because modules or command line options are specified Configuration OK [INFO ] 2019-08-16 16:31:54.072 [LogStash::Runner] runner - Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash
tail -f /var/log/logstash/logstash-plain.log [2019-08-16T16:38:05,151][WARN ][filewatch.tailmode.handlers.createinitial] open_file OPEN_WARN_INTERVAL is '300' 解決方法 log日誌文件受權 chmod 644 /var/log/messages
systemctl restart logstash
echo testlog11111111 >> /var/log/messages
yum install httpd -y echo http > /var/www/html/index.html systemctl httpd start
[root@logstash1 conf.d]# cat log-es.conf input { file { path => "/var/log/messages" type => "systemlog" start_position => "beginning" stat_interval => "3" } file { path => "/var/log/httpd/access_log" type => "apache-accesslog" start_position => "beginning" #stat_interval => "3" } } output { if [type] == "systemlog" { elasticsearch { hosts => ["192.168.10.100:9200"] index => "192.168.10.102-syslog-%{+YYYY.MM.dd}" }} if [type] == "apache-accesslog" { elasticsearch { hosts => ["192.168.10.100:9200"] index => "192.168.10.102-apache-accesslog-%{+YYYY.MM.dd}" }} }
vim /etc/systemd/system/logstash.service User=root Group=root systemctl daemon-reload systemctl restart logstash
cat conf/nginx.conf log_format access_json '{"@timestamp":"$time_iso8601",' '"host":"$server_addr",' '"clientip":"$remote_addr",' '"size":$body_bytes_sent,' '"responsetime":$request_time,' '"upstreamtime":"$upstream_response_time",' '"upstreamhost":"$upstream_addr",' '"http_host":"$host",' '"url":"$uri",' '"domain":"$host",' '"xff":"$http_x_forwarded_for",' '"referer":"$http_referer",' '"status":"$status"}'; access_log /usr/local/nginx/logs/access_json.log access_json; ./sbin/nginx -t /etc/init.d/nginx restart
[root@logstash1 conf.d]# cat log-es.conf input { file { path => "/var/log/messages" type => "systemlog" start_position => "beginning" stat_interval => "3" } file { path => "/var/log/httpd/access_log" type => "apache-accesslog" start_position => "beginning" #stat_interval => "3" } file { path => "/usr/local/nginx/logs/access_json.log" type => "nginx-accesslog" start_position => "beginning" stat_interval => "3" codec => "json" } } output { if [type] == "systemlog" { elasticsearch { hosts => ["192.168.10.100:9200"] index => "192.168.10.102-syslog-%{+YYYY.MM.dd}" }} if [type] == "apache-accesslog" { elasticsearch { hosts => ["192.168.10.100:9200"] index => "192.168.10.102-apache-accesslog-%{+YYYY.MM.dd}" }} if [type] == "nginx-accesslog" { elasticsearch { hosts => ["192.168.10.100:9200"] index => "192.168.10.102-nginx-accesslog-%{+YYYY.MM.dd}" }} }
systemctl restart logstash
安裝 Kibana
yum install kibana-6.5.4-x86_64.rpm -y
grep "^[a-Z]" /etc/kibana/kibana.yml server.port: 5601 #監聽端口 server.host: "0.0.0.0" #監聽地址 elasticsearch.url: "http://192.168.10.100:9200" #elasticsearch systemctl start kibana