自動化-ELK日誌管理

 
2臺服務器  node1  192.168.1.105    node2  192.168.1.106    /etc/hosts   分別放着解析地址

#  wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo           ###2臺服務器上操做html

http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html    官網 下載 jdk--elasticsearchjava

# yum install jdk-8u171-linux-x64.rpm elasticsearch-5.4.0.rpm                                            ###2臺服務器上操做node

# vim /etc/elasticsearch/elasticsearch.yml                                 ####node1 操做mysql

# grep "^[a-Z]" /etc/elasticsearch/elasticsearch.yml 
cluster.name: elk-cluster1
node.name: elk-node1
path.data: /data/elkdata
path.logs: /data/logs
bootstrap.memory_lock: true
network.host: 192.168.1.105
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.1.105", "192.168.1.106"]

在2臺服務器上都操做   mkdir /data/elkdata   && mkdir /data/logs       chown elasticsearch.elasticsearch /data/ -Rlinux

####node1操做nginx

# vim /usr/lib/systemd/system/elasticsearch.service web

40   LimitMEMLOCK=infinity   sql

# cat /etc/elasticsearch/jvm.options   一些內存什麼的配置文件,若是修改能夠修改這個 物理內存的一半apache

-Xms1g
-Xmx1gnpm

#systemctl daemon-reload

# systemctl restart elasticsearch.service

scp /etc/elasticsearch/elasticsearch.yml 192.168.1.104:/etc/elasticsearch/

###node2操做

# grep '^[a-z]' /etc/elasticsearch/elasticsearch.yml 
cluster.name: elk-cluster1
node.name: elk-node2
path.data: /data/elkdata
path.logs: /data/logs
bootstrap.memory_lock: true
network.host:192.168.1.106
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.1.105", "192.168.1.106"]

# vim /usr/lib/systemd/system/elasticsearch.service 

LimitMEMLOCK=infinity   

# systemctl restart elasticsearch.service

##上傳elasticsearch-head.tar.gz   && 解壓 tar xvf elasticsearch-head.tar.gz  

## yum install npm -y

#cd elasticsearch-head

#npm run start &   ##node1操做

 # vim /etc/elasticsearch/elasticsearch.yml      ##2臺都操做

bootstrap.memory_lock: true

http.cors.enabled: true
http.cors.allow-origin: "*"

 

chmod 644 /var/log/messages

   

注意: 若是ELK起不來 報 [1]: system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk     這是centos 6系統不支持SecComp  須要修改 /etc/elasticsearch/elasticsearch.yml 

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

 

 


##### node2


yum install mariadb mariadb-server gem


gem sources --add http://gems.ruby-china.com/ --remove https://rubygems.org


###node1

yum install kibana-5.6.5-x86_64.rmp

vim /etc/kibana/kibana.conf


2 行 server.port 5601

7 server.host: "192.168.1.105"
21 行 elasticsearch.url: http://192.168.1.106:9200


system restart kibana

http://192.168.1.105:5601


[logstash-system-log-1105]-YYYY.MM.DD

create

###node1安裝nginx 代理kibana 用戶密碼登陸

useradd nginx

tar -xvf nginx-1.10.3.tar.gz

cd nginx-1.10.3

yum install pcr/usr/local/nginx/conf/conf.d/e pcre-devel  openssl openssl-devel

 ./configure --prefix=/usr/local/nginx --with-http_sub_module --with-http_ssl_module

make

make install

vim /usr/local/nginx/conf/nginx.conf

user nginx;

worker_processes auto;

mkdir /usr/local/nginx/conf/conf.d

include /usr/local/nginx/conf/conf.d/*.conf;

 vim /usr/local/nginx/conf/conf.d/kibana5612.conf

upstream kibana_server {

        server  127.0.0.1:5601 weight=1 max_fails=3  fail_timeout=60;

}

 

server {

        listen 80;

        server_name www.kibana5612.com;

        location / {

        proxy_pass http://kibana_server;

        proxy_http_version 1.1;

        proxy_set_header Upgrade $http_upgrade;

        proxy_set_header Connection 'upgrade';

        proxy_set_header Host $host;

        proxy_cache_bypass $http_upgrade;

        }

}

 

 #yum install httpd-tools -y

 #htpasswd -bc /usr/local/nginx/htppass.txt kibana

#chown nginx.nginx /usr/local/nginx/ -R

#vim /usr/local/nginx/conf/conf.d/kibana5612.conf

在server_name下加這2行

auth_basic "Restricted Access";
auth_basic_user_file /usr/local/nginx/htppass.txt;

/usr/local/nginx/sbin/nginx

#yum install  logstash-5.6.5.rpm

#vim /usr/local/nginx/conf/nginx.conf

 

log_format access_json '{"@timestamp":"$time_iso8601",'
        '"host":"$server_addr",'
        '"clientip":"$remote_addr",'
        '"size":$body_bytes_sent,'
        '"responsetime":$request_time,'
        '"upstreamtime":"$upstream_response_time",'
        '"upstreamhost":"$upstream_addr",'
        '"http_host":"$host",'
        '"url":"$uri",'
        '"domain":"$host",'
        '"xff":"$http_x_forwarded_for",'
        '"referer":"$http_referer",'
        '"status":"$status"}';

 

用這個或者上面那個

[root@linux-node1 ~]# vim /usr/local/nginx/conf/nginx.conf

log_format json '{"@timestamp":"$time_iso8601",'

               '"@version":"1",'

               '"client":"$remote_addr",'

               '"url":"$uri",'

               '"status":"$status",'

               '"domain":"$host",'

               '"host":"$server_addr",'

               '"size":$body_bytes_sent,'

               '"responsetime":$request_time,'

               '"referer": "$http_referer",'

               '"ua": "$http_user_agent"'

               '}';

access_log  logs/access_json.log  json;

 

 

 

    access_log  /var/log/nginx/access.log  access_json;

#mkdir /var/log/nginx

#chown nginx.nginx /var/log/nginx -R

#mkdir /usr/local/nginx/html/web/   && touch /usr/local/nginx/html/web/index.html

#echo "is web" > /usr/local/nginx/html/web/index.html

#/usr/local/nginx/sbin/nginx -s reload

用頁面訪問一下 發現日誌格式變成json

 

vim /etc/logstash/conf.d/nginx.conf
input{
  file {
    path => "/var/log/nginx/access.log"
    type => "nginx-access-log-1105"
    start_position => "beginning"
    stat_interval => "2"
    codec => "json"
  }
  file {
    path => "/var/log/messages"
    type => "system-log-1105"
    start_position => "beginning"
    stat_interval => "2"
  }
}

output{
  if [type] == "nginx-access-log-1105" {
    elasticsearch {
      hosts => ["192.168.1.105:9200"]
      index => "logstash-nginx-accesslog-1105-%{+YYYY.MM.dd}"
    }
  }
  if [type] == "system-log-1105" {
    elasticsearch {
      hosts => ["192.168.56.12:9200"]
      index => "logstash-system-log-1105-%{+YYYY.MM.dd}"
    }
  }
}

/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/nginx.conf -t

而後根據ela集羣往kibana上添加數據

 

#node2

#mkdir /apps
#tar -xvf apache-tomcat-7.0.69 -C /apps
#ln -s /apps/apache-tomcat-7.0.69 /apps/tomcat
#cd webpps
#touch index.html && echo "tomcat" >> index.html
#chmod a+x /tomcat/bin/*.sh
#/tomcat/bin/catalina.sh start
#vim ../conf/server.xml

<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
               prefix="tomcat_access_log" suffix=".log"
               pattern="{&quot;clientip&quot;:&quot;%h&quot;,&quot;ClientUser&quot;:&quot;%l&quot;,&quot;authenticated&quot;:&quot;%u&quot;,&quot;AccessTime&quot;:&quot;%t&quot;,&quot;method&quot;:&quot;%r&quot;,&quot;status&quot;:&quot;%s&quot;,&quot;SendBytes&quot;:&quot;%b&quot;,&quot;Query?string&quot;:&quot;%q&quot;,&quot;partner&quot;:&quot;%{Referer}i&quot;,&quot;AgentVersion&quot;:&quot;%{User-Agent}i&quot;}"/>

/tomcat/bin/catalina.sh stop && start     重啓tomcat


vim /etc/logstash/conf.d/tomcat.conf
input {
  file {
    path => "/apps/tomcat/logs/tomcat_access_log.*.log"
    type => "tomcat-access-log-1106"
    start_position => "beginning"
    stat_interval => "2"
    codec => "json"
  } 
}

output {
  elasticsearch {
    hosts => ["192.168.1.106:9200"]
    index => "logstash-tomcat-access-log-1106-%{+YYYY.MM.dd}"   
 }
  file {
    path => "/tmp/tomcat.txt"  
  }
}

/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tomcat.conf -t
systemctl restart logstash

 

###node2

systemcat start mariadb.service

mysql

create database elk  character set utf8 collate utf8_bin;

grant all privileges on elk.* to elk@"%" identified by '123456';

 

########node1

unzip  mysql-connector-java-5.1.42.zip

mkdir -p  /usr/share/logstash/vendor/jar/jdbc

cp mysql-connector-java-5.1.42-bin.jar  /usr/share/logstash/vendor/jar/jdbc/

chown  logstash.logstash /usr/share/logstash/vendor/jar/  

yum install gem

 

gem sources --add https://gems.ruby-china.org/ --remove https://rubygems.org/

 

gem source list

 

/usr/share/logstash/bin/logstash-plugin  list

 

/usr/share/logstash/bin/logstash-plugin   install  logstash-output-jdbc

 

 

 #####收集tcp日誌

node2###

cd /etc/logstash/conf.d/tcp.conf

 

 input {
  tcp {
  port => "1234"
  type => "tcplog"
}
}
output {                                                             ########
  stdout {                                                            ######
    codec => "rubydebug"                                      ##測試
}                                                                        ######
}                                                                          ####

output {                                                               ####正式
  elasticsearch {
    hosts => ["192.168.1.105:9200"]
    index => "tcplog-1106-%{+YYYY.MM.dd}"
}
}



/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp.conf
systemctl restart logstas.service
######nod3
yum install nc -y
echo "test" | nc 192.168.1.106 1234
nc 192.168.1.106 1234 < /etc/passwd

在kibana上建立
[tcplog-1106]-YYYY.MM.DD

 

 

#############收集 syslog

 node1安裝haproxy

cd /usr/local/src/   && tar -xvf haproxy-1.7.9.tar.gz && cd haproxy

yum install gcc pcre pcre-devel openssl  openssl-devel -y

make TARGET=linux2628 USE_PCRE=1 USE_OPENSSL=1 USE=ZLIB=1 PREFIX=/usr/local/haproxy

make install PREFIX=/usr/local/haproxy

cp /usr/local/src/haproxy-1.7.9/haproxy /usr/sbin/

cp /usr/local/src/haproxy-1.7.9/haproxy-systemd-wrapperd /usr/sbin/

vim /etc/sysconfig/haproxy

# Add extra options to the haproxy daemon here. This can be useful for

# specifying multiple configuration files with multiple -f options.

# See haproxy(1) for a complete list of options.

OPTIONS=""

 

vim /usr/lib/systemd/system/haproxy.service

[Unit]

Description=HAProxy Load Balancer

After=syslog.target network.target

 

[Service]

EnvironmentFile=/etc/sysconfig/haproxy

ExecStart=/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid $OPTIONS

ExecReload=/bin/kill -USR2 $MAINPID

[Install]

WantedBy=multi-user.target

mkdir /etc/haproxy/

vim /etc/haproxy/haproxy.cfg

global

maxconn 100000

chroot /usr/local/haproxy

uid 99

gid 99

daemon

nbproc 1

pidfile /usr/local/haproxy/run/haproxy.pid

log 127.0.0.1 local6 info

defaults

option http-keep-alive

option  forwardfor

maxconn 100000

mode http

timeout connect 300000ms

timeout client  300000ms

timeout server  300000ms

listen stats

 mode http

 bind 0.0.0.0:9999

 stats enable

 log global

 stats uri     /haproxy-status

 stats auth    haadmin:123456

frontend kibana_web_port

    bind 192.168.1.105:80

    mode http

    log global #必須開啓日誌

    default_backend kibana_web_http_nodes

backend kibana_web_http_nodes

    mode http

    #balance source

    balance roundrobin

    cookie  SESSION_COOKIE  insert indirect nocache

    #option httpchk GET /XXX/XXX.

    server 192.168.1.105  192.168.1.105:5601  cookie kibana-web1 check inter 2000 fall 3 rise 5

 systemctl restart haproxy.service

網頁訪問192.168.1.105  發現會自動訪問到5601的kibana

 

vim /etc/rsyslog.conf  打開

 15 $ModLoad imudp
 16 $UDPServerRun 514
 19 $ModLoad imtcp
 20 $InputTCPServerRun 514

 92 local6.*        /var/log/haproxy.log

93 local6.*       @@192.168.1.106:1514

 systemctl restart rsyslog

網頁訪問 192.168.1.105:999九、haproxy-status

 

####node2

vim /etc/logstash/conf.d/rsyslog.conf

input {
  syslog {
    port => "1514"
    type => "rsyslog-1106"
  }
}

output {
  if [type] == "rsyslog-1106" {
  elasticsearch {
  hosts => ["192.168.1.105:9200"]
  index => "rsyslog-1106-%{+YYYY.MM.dd}"
  }
  }
}

systemctl restart logstash

在kibana添加

 

####安裝zookeeper集羣

3臺主機  192.168.1.105 node1     192.168.1.106 node2  192.168.1.104 node3

/etc/hosts 主機互相解析

安裝jdk  ##上傳jdk包到/usr/local/src

yum install jdk-8u151-linux-x64.rmp -y

上傳zookeeper-3.4.11.tar.gz /usr/local/src/     ###3臺子都操做 由於集羣是基數比較好這樣服務器宕機了一臺沒事 若是雙數就沒事了

tar xvf zookeeper-3.4.11.tar.gz                        ####3臺操做

ln -s /usr/local/src/zookeeper-3.4.11 /usr/local/zookeeper   ########3臺操做

mkdir /usr/local/zookeeper/data         #####3臺都操做

cp /usr/local/zookeeper/conf/zoo_sample.cfg /usr/local/zookeeper/conf/zoo.cfg    ##3臺                   ###copy拷貝配置模板文件

vim /usr/local/zookeeper/zoo.cfg

 tickTime=2000            ##服務器直接或客戶端與服務器之間的單次心跳檢測時間間隔,單位毫秒

initLimit=10                  ##集羣中leader服務器與follower服務器第一次鏈接最屢次數 

syncLimit=5                 ##leader與 follower之間發送和應答時間,若是follower在設置的時間內不能與leader通訊,那麼follower視爲不可用。

dataDir=usr/local/zookeeper/data          ###自定義的zookeeper保存數據的目錄

clientPort=2181                            ##客戶端鏈接zookeeper服務器的端口,zookeeper會監聽這個端口,接受客戶端的訪問請求

server.1=192.168.1.105:2888:3888           ###服務器編號=服務器IP:LF數據同步端口:LF選舉端口

server.2=192.168.1.106:2888:3888

server.3=192.168.1.104:2888.3888

 

#echo "1" > /usr/local/zookeeper/data/myid     ##node1執行

#echo "2" > /usr/local/zookeeper/data/myid     ##node2執行

#echo "3" > /usr/local/zookeeper/data/myid     ##node3執行

 

/usr/local/zookeeper/bin/zkServer.sh start     #3臺都執行

 

/usr/local/zookeeper/bin/zkServer.sh status  ##查看其中有一臺是leader    注#若是起不來得看下ps -ef |grep java進程在不在kill了  啓動順序是 myid1 - myid 2- myid3

 

上傳kafka /usr/local/src/    ## 3臺都執行

node1##

tar xvf /usr/local/src/kafka_2.11-1.0.0.tgz

 ln -sv /usr/local/src/kafka_2.11-1.0.0 /usr/local/kafka

vim /usr/local/kafka/config/server.properties

broker.id=1

listeners=PLAINTEXT://192.168.1.105:9092

zookeeper.connect=l92.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 #/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

node2##

tar xvf /usr/local/src/kifka_2.11-1.0.0.tgz

 ln -sv /usr/local/src/kafka_2.11-1.0.0 /usr/local/kafka

vim /usr/local/kafka/config/server.properties

broker.id=2

listeners=PLAINTEXT://192.168.1.106:9092

zookeeper.connect=l92.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 

 #/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

node3##

tar xvf /usr/local/src/kifka_2.11-1.0.0.tgz

 ln -sv /usr/local/src/kafka_2.11-1.0.0 /usr/local/kafka

vim /usr/local/kafka/config/server.properties

broker.id=3

listeners=PLAINTEXT://192.168.1.101:9092

zookeeper.connect=l92.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 #/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

 

 /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --partitions 3 --replication-factor 3 --topic logstashtest   #測試建立topic

/usr/local/kafka/bin/kafka-topics.sh --describe --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --topic logstashtest  #測試獲取topic

 

 

 /usr/local/kafka/bin/kafka-topics.sh --delete --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --topic logstashtest    ##刪除topic

/usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181   #獲取全部topic

/usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.1.105:9092,192.168.1.106:9092,192.168.1.101:9092 --topic logstashtest    ##發送消息

 

 /usr/local/kafka/bin/kafka-console-consumer.sh --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --topic logstashtest --from-beginning  ###其餘kafka機器接收數據

相關文章
相關標籤/搜索