ELK學習筆記

安裝jdk

選擇oracle官網下載源碼包html

http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.htmljava

 

# 上傳解壓jdk壓縮包node

mkdir /usr/local/java

rz 上傳壓縮包

tar zxf 壓縮包

[root@linux-node1 elasticsearch]# ll /usr/local/java/

total 4

drwxr-xr-x. 8 10 143 4096 Dec 19 16:24 jdk1.8.0_161

  

#配置java的環境變量linux

[root@linux-node1 elasticsearch]# tail -4 /etc/profile

JAVA_HOME=/usr/local/java/jdk1.8.0_161

JRE_HOME=$JAVA_HOME/jre

PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin

CLASSPATH=:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib/dt.jar

[root@linux-node1 elasticsearch]# . /etc/profile    

 

  

#檢查環境變量配置nginx

[root@linux-node1 elasticsearch]# java -version

java version "1.8.0_161"

Java(TM) SE Runtime Environment (build 1.8.0_161-b12)

Java HotSpot(TM) 64-Bit Server VM (build 25.161-b12, mixed mode)

 

[root@linux-node1 elasticsearch]# javac

Usage: javac <options> <source files>

where possible options include:

  -g                         Generate all debugging info

  -g:none                    Generate no debugging info

  -g:{lines,vars,source}     Generate only some debugging info

  -nowarn                    Generate no warnings

  -verbose                   Output messages about what the compiler is doing

  -deprecation               Output source locations where deprecated APIs are used

  -classpath <path>          Specify where to find user class files and annotation processors

  -cp <path>                 Specify where to find user class files and annotation processors  

若是出現一堆幫助表示環境變量配置成功。git

 

安裝elksticksearch

按照官網選擇最新版本6.1github

選擇rpm包方式安裝web

參考網站 :      https://www.elastic.co/guide/en/elasticsearch/reference/6.1/rpm.htmlredis

#下載公鑰到本地主機npm

rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

  

#配置yum倉庫

 

vim /etc/yum.repos.d/elasticsearch.repo

[elasticsearch-6.x]
name=Elasticsearch repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
 

  

# yum 安裝

sudo yum install elasticsearch 

# 啓停程序命令

service elasticsearch start
service elasticsearch stop

 

# 設置開機自啓動

 

systemctl enable elasticsearch.service

 

#配置文件修改項

[root@linux-node1 elasticsearch]# grep '^[a-z]' elasticsearch.yml 
cluster.name: oldboy        #集羣名稱
node.name: linux-node1            #主機名 
path.data: /data/es-data    #數據存儲路徑
path.logs: /var/log/elasticsearch/ #日誌路徑
network.host: 0.0.0.0         #任何主機均可以訪問
http.port: 9200                      #默認http鏈接端口9200

 

#在結尾加上jdk環境路徑

[root@linux-node1 elasticsearch]# tail -1 /etc/sysconfig/elasticsearch 
JAVA_HOME=/usr/local/java/jdk1.8.0_161

 

#檢查默認端口9200是否存在

[root@linux-node1 elasticsearch]# netstat -lntup|grep 9200
tcp6       0      0 :::9200                 :::*                    LISTEN      8300/java  

 

# 測試鏈接

[root@linux-node1 elasticsearch]# curl 10.0.0.5:9200
{
  "name" : "0Rl_dTb",
  "cluster_name" : "elasticsearch",
  "cluster_uuid" : "9zK23FE9Thq-x7eZz0GQvg",
  "version" : {
    "number" : "6.1.3",
    "build_hash" : "af51318",
    "build_date" : "2018-01-26T18:22:55.523Z",
    "build_snapshot" : false,
    "lucene_version" : "7.1.0",
    "minimum_wire_compatibility_version" : "5.6.0",
    "minimum_index_compatibility_version" : "5.0.0"
  },
  "tagline" : "You Know, for Search"
}
 

 

安裝header插件

cd  /opt

wget https://github.com/mobz/elasticsearch-head/archive/master.zip
wget https://npm.taobao.org/mirrors/node/latest-v4.x/node-v4.4.7-linux-x64.tar.gz tar -zxvf node-v4.4.7-linux-x64.tar.gz
 

 

分別解壓

 

 

 

#配置nodejs環境變量

[root@linux-node1 elasticsearch-head-master]# tail -3 /etc/profile
NODE_HOME=/opt/node-v4.4.7-linux-x64
PATH=$PATH:$NODE_HOME/bin
NODE_PATH=$NODE_HOME/lib/node_modules
[root@linux-node1 elasticsearch]# . /etc/profile 
                

#安裝cnpm提高下載速度

[root@node1 ~]#  npm install -g cnpm --registry=https://registry.npm.taobao.org
# 下載依賴
cnpm install  --registry=https://registry.npm.taobao.org
 

 

#安裝grunt

npm install -g grunt --registry=https://registry.npm.taobao.org
npm install -g grunt-cli --registry=https://registry.npm.taobao.org --no-proxy

 

#檢查grunt安裝

[root@linux-node1 ~]# grunt -version
grunt-cli v1.2.0

 

若是ElasticSearch已經啓動,須要先中止

[es@node1 ~]$ jps
3261 Elasticsearch
3375 Jps
[es@node1 ~]$ kill 3261

配置 ElasticSearch,使得HTTP對外提供服務

[es@node1 elasticsearch-6.1.1]$ vi config/elasticsearch.yml
# 增長新的參數,這樣head插件能夠訪問es。設置參數的時候:後面要有空格
http.cors.enabled: true
http.cors.allow-origin: "*"

 

修改Head插件配置文件

[es@node1 elasticsearch-head-master]$ vi Gruntfile.js
找到connect:server,添加hostname一項,以下
connect: {
                        server: {
                                options: {
                                        hostname: 'linux-node1',
                                        port: 9100,
                                        base: '.',
                                        keepalive: true
                                }
                        }
                }

# 從新開啓elasticsearch

[root@linux-node1 elasticsearch-head-master]# service elasticsearch start
Starting elasticsearch (via systemctl):                    [  OK  ]

啓動head 
經過命令grunt server啓動head

[es@node1 elasticsearch-head-master]$ grunt server
Running "connect:server" (connect) task
Waiting forever...
Started connect web server on http://linux-node1:9100

 

 

Linux-node2 一樣的配置
[root@linux-node1 elasticsearch-head-master]# tail -4 /etc/elasticsearch/elasticsearch.yml  
discovery.zen.ping.unicast.hosts: ["linux-node1", "linux-node2"]     # 集羣節點發現列表,也可採用ip的形式
discovery.zen.minimum_master_nodes: 2   #集羣可作master的最小節點數,生產環境建議節點數至少3個且爲基數
 

健康值綠色表示正常

 

Logstash 安裝

#下載公鑰到本地server

rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

#配置yum倉庫

vi /etc/yum.repos.d/logstash.repo

[logstash-6.x]

name=Elastic repository for 6.x packages

baseurl=https://artifacts.elastic.co/packages/6.x/yum

gpgcheck=1

gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch

enabled=1

autorefresh=1

type=rpm-md

#安裝

yum install logstash

 

# 解坑

[root@linux-node1 ~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

curl: (6) Could not resolve host: artifacts.elastic.co; Unknown error

[root@linux-node1 ~]# curl https://artifacts.elastic.co/packages/6.x/yum/repodata/repomd.xml

curl: (6) Could not resolve host: artifacts.elastic.co; Unknown error

[root@linux-node1 ~]# yum makecache

Loaded plugins: fastestmirror, langpacks

Repository base is listed more than once in the configuration

Repository updates is listed more than once in the configuration

Repository extras is listed more than once in the configuration

Repository centosplus is listed more than once in the configuration

base                                                                           | 3.6 kB  00:00:00    

https://artifacts.elastic.co/packages/6.x/yum/repodata/repomd.xml: [Errno 14] curl#6 - "Could not resolve host: artifacts.elastic.co; Temporary failure in name resolution"

Trying other mirror.

 

 

 One of the configured repositories failed (Elasticsearch repository for 6.x packages),

 and yum doesn't have enough cached data to continue. At this point the only

 safe thing yum can do is fail. There are a few ways to work "fix" this:

 

     1. Contact the upstream for the repository and get them to fix the problem.

 

     2. Reconfigure the baseurl/etc. for the repository, to point to a working

        upstream. This is most often useful if you are using a newer

        distribution release than is supported by the repository (and the

        packages for the previous distribution release still work).

 

     3. Run the command with the repository temporarily disabled

            yum --disablerepo=elasticsearch-6.x ...

 

     4. Disable the repository permanently, so yum won't use it by default. Yum

        will then just ignore the repository until you permanently enable it

        again or use --enablerepo for temporary usage:

 

            yum-config-manager --disable elasticsearch-6.x

        or

            subscription-manager repos --disable=elasticsearch-6.x

 

     5. Configure the failing repository to be skipped, if it is unavailable.

        Note that yum will try to contact the repo. when it runs most commands,

        so will have to try and fail each time (and thus. yum will be be much

        slower). If it is a very temporary problem though, this is often a nice

        compromise:

 

            yum-config-manager --save --setopt=elasticsearch-6.x.skip_if_unavailable=true

 

failure: repodata/repomd.xml from elasticsearch-6.x: [Errno 256] No more mirrors to try.

https://artifacts.elastic.co/packages/6.x/yum/repodata/repomd.xml: [Errno 14] curl#6 - "Could not resolve host: artifacts.elastic.co; Temporary failure in name resolution"

[root@linux-node1 ~]# yum update

Loaded plugins: fastestmirror, langpacks

Repository base is listed more than once in the configuration

Repository updates is listed more than once in the configuration

Repository extras is listed more than once in the configuration

Repository centosplus is listed more than once in the configuratio

 

# 出現這種報錯緣由是我用了國內阿里的免費DNS服務器,加一個世界有名歷史悠久的dns服務器 114.114.114.114就能夠解析這個域名了

[root@linux-node1 ~] echo  servername 114.114.114.114  >>vi /etc/resolve.conf

 

[root@linux-node1 ~] yum makecache

Loaded plugins: fastestmirror, langpacks

Repository base is listed more than once in the configuration

Repository updates is listed more than once in the configuration

Repository extras is listed more than once in the configuration

Repository centosplus is listed more than once in the configuration

base                                                                           | 3.6 kB  00:00:00    

elasticsearch-6.x                                                              | 1.3 kB  00:00:00    

extras                                                                         | 3.4 kB  00:00:00    

logstash-6.x                                                                   | 1.3 kB  00:00:00    

updates                                                                        | 3.4 kB  00:00:00    

(1/18): base/7/x86_64/group_gz                                                 | 156 kB  00:00:05    

(2/18): base/7/x86_64/other_db                                                 | 2.5 MB  00:00:06    

(3/18): base/7/x86_64/filelists_db                                             | 6.7 MB  00:00:07    

(4/18): base/7/x86_64/primary_db                                               | 5.7 MB  00:00:15    

(5/18): elasticsearch-6.x/primary                                              |  31 kB  00:00:14    

(6/18): elasticsearch-6.x/other                                                | 4.1 kB  00:00:00    

(7/18): extras/7/x86_64/prestodelta                                            | 102 kB  00:00:00    

(8/18): extras/7/x86_64/filelists_db                          

 

# 前臺啓動logstash

[root@linux-node1 logstash]# /usr/share/logstash/bin/logstash -e 'input{stdin{}}output{stdout{codec=>rubydebug}}'

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

The stdin plugin is now waiting for input:

hehe

{

       "message" => "hehe",

    "@timestamp" => 2018-02-03T16:35:59.357Z,

      "@version" => "1",

          "host" => "linux-node1"

}

 

 

[root@linux-node1 logstash]# /usr/share/logstash/bin/logstash -e 'input { stdin{} } output { elasticsearch { hosts=> ["10.0.0.5:9200"] } }'

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

The stdin plugin is now waiting for input:

123

hehe

hahahaha 

 

 

 

es查看索引

 

 [root@linux-node1 logstash]# /usr/share/logstash/bin/logstash -e 'input { stdin{} } output { elasticsearch { hosts=> ["10.0.0.5:9200"] } stdout{ codec => rubydebug } }'

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

The stdin plugin is now waiting for input:

hahahaha

{

    "@timestamp" => 2018-02-03T16:56:30.522Z,

          "host" => "linux-node1",

      "@version" => "1",

       "message" => "hahahaha"

}

ok

{

    "@timestamp" => 2018-02-03T16:56:33.236Z,

          "host" => "linux-node1",

      "@version" => "1",

       "message" => "ok"

}

yes

{

    "@timestamp" => 2018-02-03T16:56:38.412Z,

          "host" => "linux-node1",

      "@version" => "1",

       "message" => "yes"

}   

 

#編寫配置文件方便啓動優雅輸出模式

[root@linux-node1 logstash]# cat /etc/logstash/conf.d/01-logstash.conf

input { stdin { } }

output {

  elasticsearch { hosts => ["localhost:9200"] }

  stdout { codec => rubydebug }

}

 

 

# 指定輸入輸出配置文件運行

[root@linux-node1 logstash]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/01-logstash.conf

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

The stdin plugin is now waiting for input:

haha

{

      "@version" => "1",

    "@timestamp" => 2018-02-03T17:09:26.941Z,

       "message" => "haha",

          "host" => "linux-node1"

}

我是誰

{

      "@version" => "1",

    "@timestamp" => 2018-02-03T17:09:32.405Z,

       "message" => "我是誰",

          "host" => "linux-node1"

}

 

# 配置收集日誌

[root@linux-node1 ~]# cat file.conf

input {

    file {

        path => "/var/log/messages"

        type => "system"

        start_position => "beginning"

    }

}

 

output {

    elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "system-%{+YYYY.MM.dd}"

        }

}

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f file.conf

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

 

 

 

# 注意空格

# 配置收集java日誌文件,並對收集的日誌做判斷

[root@linux-node1 ~]# cat file.conf

input {

    file {

        path => "/var/log/messages"

        type => "system"

        start_position => "beginning"

    }

   

    file {

         path => "/var/log/elasticsearch/oldboy.log"

         type => "es-error"

         start_position => "beginning"

     }

}

 

output {

   

    if [type] =="system" {

 

    elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "system-%{+YYYY.MM.dd}"

        }

    }

if [type] == "es-error" {

elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "es-error-%{+YYYY.MM.dd}"

        }

 

    }

 

}

 

 

 

可是這樣配置報錯日誌不是連貫的由於是按行存儲

咱們應該將一個報錯信息錯誤統一存到一個事件中。

 

 

[root@linux-node1 ~]# cat multiline.conf

input {

        stdin {

                codec => multiline {

                pattern => "^\["

                negate => true

                what => "previous"

                }

        }

}

 

output {

        stdout {

                codec => "rubydebug"

 

}

}

 

#測試匹配【

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f multi.conf 

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

The stdin plugin is now waiting for input:

[1]

[2]

{

       "message" => "[1]",

    "@timestamp" => 2018-02-03T18:11:40.168Z,

          "host" => "linux-node1",

      "@version" => "1"

}

[3dsdfdffdgdf

{

       "message" => "[2]",

    "@timestamp" => 2018-02-03T18:12:05.845Z,

          "host" => "linux-node1",

      "@version" => "1"

}

fdgdfgdfgdfgdfdfdf

sdfsdfsdfsd

sdfsdfsd

[4

{

       "message" => "[3dsdfdffdgdf\nfdgdfgdfgdfgdfdfdf\nsdfsdfsdfsd\nsdfsdfsd",

    "@timestamp" => 2018-02-03T18:12:18.265Z,

          "host" => "linux-node1",

      "@version" => "1",

          "tags" => [

        [0] "multiline"

    ]

}

 

 

將這樣的匹配規則放進總配置文件

[root@linux-node1 ~]# cat file.conf

input {

    file {

        path => "/var/log/messages"

        type => "system"

        start_position => "beginning"

    }

   

    file {

         path => "/var/log/elasticsearch/oldboy.log"

     type => "es-error"

         start_position => "beginning"

         codec => multiline {

                pattern => "^\["

                negate => true

                what => "previous"

            }

     }

}

 

 

安裝kibana

 

[root@linux-node1 ~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

[root@linux-node1 ~]# cd /etc/yum.repos.d/

[root@linux-node1 yum.repos.d]# vi kibana.repo

[kibana-6.x]

name=Kibana repository for 6.x packages

baseurl=https://artifacts.elastic.co/packages/6.x/yum

gpgcheck=1

gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch

enabled=1

autorefresh=1

type=rpm-md

 

yum install kibana

 

 

#更改kibana配置文件

[root@linux-node1 yum.repos.d]# grep '^[a-z]' /etc/kibana/kibana.yml     

server.port: 5601

server.host: "0.0.0.0"

elasticsearch.url: "http://10.0.0.5:9200"

kibana.index: ".kibana"

 

# 開個screen 執行啓動腳本

[root@linux-node1 yum.repos.d]# /usr/share/kibana/bin/kibana

Ctrl+a+d 退出screen

 

 

#經過瀏覽器進入kibana界面

 

 

建立個index pattern

 

 

# discover 顯示默認15分鐘內的日誌,改成today觀察一天內的

 

 

 

收集nginx的訪問日誌

yum install epel-release 

yum install nginx –y

 

 

#定義nginx 日誌數據格式爲json

參考官方網站http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format

vi /etc/nginx/nginx.conf

   log_format json '{"@timestamp":"$time_iso8601",'

                 '"host":"$server_addr",'

                 '"clientip":"$remote_addr",'

                 '"size":$body_bytes_sent,'

                 '"responsetime":$request_time,'

                 '"upstreamtime":"$upstream_response_time",'

                 '"upstreamhost":"$upstream_addr",'

                 '"http_host":"$host",'

                 '"url":"$uri",'

                 '"referer":"$http_referer",'

                 '"agent":"$http_user_agent",'

                 '"status":"$status"}';

 

# 指定使用json日誌放在server

 

 

#啓動nginx

[root@linux-node1 ~]# systemctl start nginx

[root@linux-node1 ~]# netstat -lntup|grep nginx

tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      6333/nginx: master 

tcp6       0      0 :::80                   :::*                    LISTEN      6333/nginx: master

 

 

登錄瀏覽 輸入 10.0.0.5 瘋狂刷新生成json訪問日誌

tail –f /var/log/nginx/access_json.log

{"@timestamp":"2018-02-03T11:40:01-08:00","host":"10.0.0.5","clientip":"10.0.0.1","size":0,"responsetime":0.000,"upstreamtime":"-","upstreamhost":"-","http_host":"10.0.0.5","url":"/index.html","referer":"-","agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3322.4 Safari/537.36","status":"304"}

 

 

#編寫收集配置文件

[root@linux-node1 ~]# cat json.conf

input {

 

        file {

                path => "/var/log/nginx/access_json.log "

                codec => "json"

        }

 

}

 

output{

 

        stdout{

                codec => "rubydebug"

        }

 

}

指定配置啓動

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f json.conf

 

 

#更改總收集日誌配置all.conf

#輸入增長一個file

                  file {

                path => "/var/log/nginx/access_json.log"

                codec => json

                start_position => "beginning"

                type => "nginx-log"

}

        #輸出增長一個type判斷

                  

    if [type] == "nginx-log" {

                elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "nginx-log-%{+YYYY.MM.dd}"

        }

        }

 

 

查看head

 

 

 

#kibana添加index pattern後查看

 

 

 

 

收集rsyslog日誌

# 寫syslog的收集日誌配置

[

root@linux-node1 ~]# cat syslog.conf

Input {

        syslog {

                type => "system-syslog"

                host => "10.0.0.5"

                port => "514"

 

        }

}

 

 

output {

        stdout {

                codec => "rubydebug"

        }

 

}

 

#指定syslog配置文件運行logstash

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f syslog.conf

[root@linux-node1 ~]# netstat -lntup |grep 514

tcp6       0      0 10.0.0.5:514            :::*                    LISTEN      7086/java          

udp        0      0 10.0.0.5:514            0.0.0.0:*                           7086/java          

 

 

# rsyslog配置文件

[root@linux-node1 ~]# tail -2 /etc/rsyslog.conf         

*.* @@10.0.0.5:514

# ### end of the forwarding rule ###

#重啓rsyslog

[root@linux-node1 ~]# systemctl restart rsyslog

#有輸出表示收集成功

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f syslog.conf

WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

{

          "facility" => 3,

           "message" => "Stopping System Logging Service...\n",

         "timestamp" => "Feb  3 12:22:00",

    "facility_label" => "system",

              "type" => "system-syslog",

           "program" => "systemd",

          "priority" => 30,

    "severity_label" => "Informational",

              "host" => "10.0.0.5",

          "severity" => 6,

          "@version" => "1",

        "@timestamp" => 2018-02-03T20:22:00.000Z,

         "logsource" => "linux-node1"

}

 

#驗證成功 將syslog收集模塊寫進all.conf

#寫完運行

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f all.conf

 

#查看head

 

 

 

#kibana 增長index pattern後查看

[root@linux-node1 ~]# logger "hehe1"

[root@linux-node1 ~]# logger "hehe2"

[root@linux-node1 ~]# logger "hehe3"

[root@linux-node1 ~]# logger "hehe4"

[root@linux-node1 ~]# logger "hehe5"

[root@linux-node1 ~]# logger "hehe5"

 

 

 

 

收集tcp日誌

 

#tcp日誌收集配置

 

[root@linux-node1 ~]# cat tcp.conf

input {

        tcp {

                host => "10.0.0.5"

                port => "6666"

        }

}

 

output {

        stdout {

                codec => "rubydebug"

        }

}

 

 

#啓動logstash測試

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f tcp.conf

[root@linux-node1 ~]# netstat -lntup|grep 6666

tcp6       0      0 10.0.0.5:6666           :::*                    LISTEN      7668/java

 

 

#使用nc命令測試

[root@linux-node1 ~]# nc 10.0.0.5 6666 < /etc/resolv.conf

[root@linux-node1 ~]# echo "hehe" | nc 10.0.0.5 6666

# 重定向一個僞設備(黑魔法)

[root@linux-node1 ~]# echo "oldboy" > /dev/tcp/10.0.0.5/6666

有輸出就表示日誌收集成功

{

       "message" => "hehe",

    "@timestamp" => 2018-02-03T20:49:06.743Z,

          "port" => 56202,

          "host" => "linux-node1",

      "@version" => "1"

}

{

       "message" => "oldboy",

    "@timestamp" => 2018-02-03T20:50:29.944Z,

          "port" => 56210,

          "host" => "linux-node1",

      "@version" => "1"

}

 

 

Filter插件

# 編寫grok配置

參考網站

https://github.com/logstash-plugins/logstash-patterns-core/tree/master/patterns

[root@linux-node1 ~]# cat grok.conf

input {

        stdin {

 

        }

 

}

 

filter {

  grok {

      match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" }

            }

                }

 

output {

        stdout {

                codec => "rubydebug"

        }

 

}

#啓動logstash

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f grok.conf

輸入 測試url

55.3.244.1 GET /index.html 15824 0.043

輸出

{

       "message" => "55.3.244.1 GET /index.html 15824 0.043",

         "bytes" => "15824",

    "@timestamp" => 2018-02-03T21:10:31.416Z,

      "duration" => "0.043",

        "client" => "55.3.244.1",

      "@version" => "1",

          "host" => "linux-node1",

        "method" => "GET",

       "request" => "/index.html"

}

 

Logstash 解耦之消息隊列

#安裝redis

[root@linux-node1 ~]# yum install -y redis

#更改配置

[root@linux-node1 ~]# vi /etc/redis

daemonize yes   #設置在後臺運行

bind 10.0.0.5     #綁定本機ip

[root@linux-node1 ~]# systemctl start redis

[root@linux-node1 ~]# netstat -lntup |grep 6379

tcp        0      0 10.0.0.5:6379           0.0.0.0:*               LISTEN      9199/redis-server 1

 

配置redis 收集日誌

[root@linux-node1 ~]# cat redis-out.conf

input {

        stdin {}

}

 

output {

        redis {

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "demo"

        }

}

#啓動 logstash

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f redis-out.conf

 

#啓動redis-cli

[root@linux-node1 ~]# redis-cli -h 10.0.0.5

10.0.0.5:6379>

#輸入info 生成一個db6的文件

# Keyspace

db6:keys=1,expires=0,avg_ttl=0

10.0.0.5:6379> select 6

OK

10.0.0.5:6379[6]> keys *

1) "demo"

#以列表形式查看

10.0.0.5:6379[6]> lindex demo -1

"{\"host\":\"linux-node1\",\"message\":\"hello redis\",\"@timestamp\":\"2018-02-04T06:19:49.454Z\",\"@version\":\"1\"}"

 

#造42條數據

10.0.0.5:6379[6]> llen demo

(integer) 42

 

#配置redis-in.conf

[root@linux-node1 ~]# cat redis-in.conf

input {

         redis {

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "demo"

        }

 

}

output {

         elasticsearch {

             hosts => ["10.0.0.5:9200"]

                 index => "redis-demo-%{+YYYY.MM.dd}"

         }   

}

 

# 啓動logstash 讀取redis數據

[root@linux-node1 ~]# /usr/share/logstash/bin/logstash -f redis-in.conf 

n\":\"1\"}"

10.0.0.5:6379[6]> llen demo

(integer) 42

10.0.0.5:6379[6]> llen demo

(integer) 0

 

 

#查看 elasticsearch head

 

 

 

 

#配置運輸者 文件

[root@linux-node1 ~]# cat shipper.conf

input {

   

        syslog {

                type => "system-syslog"

                host => "10.0.0.5"

                port => "514"

        }

 

    file {

        path => "/var/log/messages"

        type => "system"

        start_position => "beginning"

    }

   

    file {

         path => "/var/log/elasticsearch/oldboy.log"

     type => "es-error"

         start_position => "beginning"

         codec => multiline {

                pattern => "^\["

                negate => true

                what => "previous"

            }

     }

 

        file {

                path => "/var/log/nginx/access_json.log"

                codec => json

                start_position => "beginning"

                type => "nginx-log"

 

        }

 

}

 

output {

   

    if [type] =="system" {

 

         redis {

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "system"

        }

 

    }

    if [type] == "es-error" {

 

         redis {

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "es-error"

        }

    }

 

 

    if [type] == "nginx-log" {

         redis {

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "nginx-log"

        }

 

        }

 

    if [type] == "system-syslog" {

         redis {

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "system-syslog"

        }

 

}

}

#開啓redis查看

10.0.0.5:6379> select 6

OK

10.0.0.5:6379[6]> keys *

1) "system-syslog"

2) "system"

目前只顯示system有日誌

訪問nginx 造訪問日誌

造點es-error日誌cat oldboy.log |tee  -a oldboy.log

10.0.0.5:6379[6]> keys *

1) "es-error"

2) "system-syslog"

3) "nginx-log"

4) "system"

 

 

在node2 節點上配置 使得從redis讀取數據 往es裏面寫

[root@linux-node2 ~]# cat indexer.conf

input {

 

         redis {

                type =>"system"

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "system"

        }

 

 

         redis {

                type => "es-error"

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "es-error"

        }

 

 

         redis {

                type => "nginx-log"

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "nginx-log"

        }

 

 

         redis {

                type => "system-syslog"

                host => "10.0.0.5"

                port => "6379"

                db => "6"

                data_type => "list"

                key => "system-syslog"

        }

 

 

}

 

output {

   

    if [type] == "system" {

 

    elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "system-%{+YYYY.MM.dd}"

        }

    }

    if [type] == "es-error" {

    elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "es-error-%{+YYYY.MM.dd}"

        }

 

    }

 

    if [type] == "nginx-log" {

    elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "nginx-log-%{+YYYY.MM.dd}"

        }

        }

 

    if [type] == "system-syslog" {

    elasticsearch {

        hosts => ["10.0.0.5:9200"]

        index => "system-syslog-%{+YYYY.MM.dd}"

        }

        }

}

 

#在node1上查看redis

[

root@linux-node1 ~]# redis-cli -h 10.0.0.5

10.0.0.5:6379> select 6

OK

10.0.0.5:6379[6]> keys *

1) "es-error"

 

 

只剩一個es-error 因爲es-error數據量過大須要一段時間讀寫

10.0.0.5:6379[6]> keys *

(empty list or set)

輸出empty全部都讀寫完畢

 

 

#在kibana上查看最近十五分鐘的日誌

 

 

 

造點數據使用壓力測試命令ab  -n表明請求次數  -c併發量

安裝ab命令

查看apr-util, yum-utils是否安裝:

$ rpm -qa|grep apr-util

apr-util-1.3.9-3.el6_0.1.x86_64

…

$ rpm -qa|grep yum-utils

yum-utils-1.1.30-30.el6.noarch

若在命令執行後有結果列出(如上),則說明已安裝。

 

不然用yum安裝:

$sudo yum -y install apr-util

$sudo yum -y install yum-utils

yumdownloader httpd-tools* 

rpm2cpio httpd-*.rpm | cpio -idmv

[root@linux-node1 abtmp]# cp -pa usr/bin/ab /usr/bin/

 

[root@linux-node1 abtmp]# ab -n10000 -c1 http://10.0.0.5/

 

 

 

 

假裝生成一些系統日誌輸出1000個hehe

[root@linux-node1 abtmp]# for((i=1;i<=1000;i++));do logger "hehe$i";done

 

查看kibana

 

 

 

Kibana可視化 設置

搜索404 狀態碼請求

 

 

設置markdown

 

 

 

 

 

設置可視化餅圖pie

 

 

#增長一個統計 metric

 

 

#建立折線圖line

 

 

 

 

# 設置柱狀圖 vertic bar

 

 

 

 

建立儀表盤dashboard

 

 

生產如何上線ELK。

  1. 日誌分類

a)         系統日誌 rsyslog logstash syslog 插件

b)         訪問日誌 nginx logstash codec json

c)         錯誤日誌 file logstash file+ multiline

d)         運行日誌 file logstash codec json

e)         設備日誌 syslog logstash syslog插件

f)          Debug日誌 file logstash json or mulitline

  1. 日誌標準化
    1. 路徑 固定
    2. 格式 儘可能json

3.  收集日誌順序

從系統日誌開始->錯誤日誌->運行日誌->訪問日誌

4.  消息隊列能夠採用

Redis  rabbitmq  kafka

相關文章
相關標籤/搜索