https://www.docker.elastic.cohtml
elasticsearchnode
參考
https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.htmlgit
拉取鏡像
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.4.2github
查看命令
docker inspect docker.elastic.co/elasticsearch/elasticsearch:7.4.2docker
mkdir /opt/elasticsearch -p
vim /opt/elasticsearch/Dockerfilejson
FROM docker.elastic.co/elasticsearch/elasticsearch:7.4.2 EXPOSE 9200 EXPOSE 9300
9200是http 9300是tcpbootstrap
mkdir /opt/elasticsearch/usr/share/elasticsearch/data/ -p
cat /etc/passwd
data及其子目錄(-R)賦予權限,不然沒法寫入數據
chown 1000:1000 -R /opt/elasticsearch/usr/share/elasticsearch/data/vim
chown 1000:1000 -R /opt/elasticsearch/usr/share/elasticsearch/logs/ruby
vim /opt/elasticsearch/docker-compose.ymlbash
version: '2.2' services: elasticsearch: image: v-elasticsearch restart: always container_name: elasticsearch build: context: . dockerfile: Dockerfile ports: - "9200:9200" - "9300:9300" environment: - cluster.name=docker-cluster - discovery.type=single-node - bootstrap.memory_lock=true - network.host=0.0.0.0 - http.cors.enabled=true - http.cors.allow-origin=* - ES_JAVA_OPTS=-Xms512m -Xmx512m ulimits: memlock: soft: -1 hard: -1 healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:9200"] retries: 300 interval: 1s volumes: - ./usr/share/elasticsearch/data/:/usr/share/elasticsearch/data - ./usr/share/elasticsearch/logs:/usr/share/elasticsearch/logs
也能夠用先寫好配置文件的方式
version: '2.2' services: elasticsearch: image: v-elasticsearch restart: always container_name: elasticsearch build: context: . dockerfile: Dockerfile ports: - "9200:9200" - "9300:9300" environment: - ES_JAVA_OPTS=-Xms256m -Xmx256m ulimits: memlock: soft: -1 hard: -1 healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:9200"] retries: 300 interval: 1s volumes: - ./usr/share/elasticsearch/data/:/usr/share/elasticsearch/data - ./usr/share/elasticsearch/logs:/usr/share/elasticsearch/logs - ./usr/share/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
配置文件參考:
https://github.com/elastic/elasticsearch-docker/tree/master/.tedi/template
https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
mkdir -p /opt/elasticsearch/usr/share/elasticsearch/config/
vim /opt/elasticsearch/usr/share/elasticsearch/config/elasticsearch.yml
--- discovery.type: single-node bootstrap.memory_lock: true cluster.name: docker-cluster network.host: 0.0.0.0 http.cors.enabled: true http.cors.allow-origin: "*" http.cors.allow-methods: OPTIONS, HEAD, GET, POST, PUT, DELETE http.cors.allow-headers: "X-Requested-With, Content-Type, Content-Length, X-User"
ES的密碼管理是用x-pack來實現的
默認帳戶爲elastic默認密碼爲changme
這裏省略掉x-pack
cd /opt/elasticsearch
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看日誌
docker logs --tail="500" elasticsearch
檢查狀態
curl http://127.0.0.1:9200/_cat/health
netstat -anltp|grep 9200
進入容器
docker exec -it elasticsearch /bin/bash
檢查容器
docker exec -it elasticsearch /bin/bash /usr/share/elasticsearch/bin/elasticsearch --help
docker exec -it elasticsearch /bin/bash /usr/share/elasticsearch/bin/elasticsearch --version
複製配置文件
docker cp elasticsearch:/usr/share/elasticsearch/config/elasticsearch.yml /opt/elasticsearch/elasticsearch_bak.yml
-----------------------------------
elasticsearch-head
mkdir /opt/elasticsearch-head -p
vim /opt/elasticsearch-head/Dockerfile
FROM mobz/elasticsearch-head:5 EXPOSE 9100
vim /opt/elasticsearch-head/docker-compose.yml
version: '2.2'
services:
elasticsearch-head:
image: v-elasticsearch-head
restart: always
container_name: elasticsearch-head
build:
context: .
dockerfile: Dockerfile
ports:
- 9100:9100
environment: TZ: 'Asia/Shanghai'
cd /opt/elasticsearch-head
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看日誌
docker logs --tail="500" elasticsearch-head
netstat -anltp|grep 9100
進入容器
docker exec -it elasticsearch-head /bin/bash
檢查容器
docker exec -it elasticsearch-head /bin/bash /usr/share/elasticsearch-head/bin/elasticsearch-head --help
docker exec -it elasticsearch-head /bin/bash /usr/share/elasticsearch-head/bin/elasticsearch-head --version
導出配置文件
docker cp elasticsearch-head:/usr/src/app/Gruntfile.js /opt/elasticsearch-head/Gruntfile.js
mkdir /opt/elasticsearch-head/_site
docker cp elasticsearch-head:/usr/src/app/_site/app.js /opt/elasticsearch-head/_site/app.js
docker-compose加入
volumes:
- ./Gruntfile.js:/usr/src/app/Gruntfile.js
- ./_site/app.js:/usr/src/app/_site/app.js
chown 1000:1000 -R /opt/elasticsearch-head/
從新生成建立啓動
Gruntfile.js加入hostname: '*'
connect: {
server: {
options: {
port: 9100,
base: '.',
keepalive: true,
hostname: '*'
}
}
}
------------------------------------
kibana
https://www.elastic.co/guide/en/kibana/current/docker.html
docker pull docker.elastic.co/kibana/kibana:7.4.2
docker inspect docker.elastic.co/kibana/kibana:7.4.2
mkdir /opt/kibana -p
vim /opt/kibana/Dockerfile
FROM docker.elastic.co/kibana/kibana:7.4.2 EXPOSE 5601
vim /opt/kibana/docker-compose.yml
version: '2.2' services: kibana: image: v-kibana restart: always container_name: kibana build: context: . dockerfile: Dockerfile environment: - SERVER_NAME=kibana - SERVER_HOST=0.0.0.0 - ELASTICSEARCH_HOSTS=http://192.168.1.101:9200 - KIBANA_DEFAULTAPPID=discover - I18N_LOCALE=zh-CN network_mode: host healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:5601"] retries: 300 interval: 1s ports: - 5601:5601
也能夠用先寫好配置文件的方式
vim /opt/kibana/docker-compose.yml
version: '2.2' services: kibana: image: v-kibana restart: always container_name: kibana build: context: . dockerfile: Dockerfile network_mode: host healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:5601"] retries: 300 interval: 1s ports: - 5601:5601 volumes: - ./usr/share/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
mkdir -p /opt/kibana/usr/share/kibana/config/
vim /opt/kibana/usr/share/kibana/config/kibana.yml
server.name: kibana server.host: "0.0.0.0" elasticsearch.hosts: [ "http://192.168.101:9200" ] kibana.defaultAppId: discover i18n.locale: zh-CN xpack.monitoring.ui.container.elasticsearch.enabled: true
cd /opt/kibana
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看日誌
docker logs --tail="500" kibana
檢查狀態
curl http://192.168.1.101:5601/_cat/health
netstat -anltp|grep 5601
進入容器
docker exec -it kibana /bin/bash
檢查容器
docker exec -it kibana /bin/bash /usr/share/kibana/bin/kibana --help
docker exec -it kibana /bin/bash /usr/share/kibana/bin/kibana --version
複製配置文件
docker cp kibana:/usr/share/kibana/config/kibana.yml /opt/kibana/kibana_bak.yml
------------------------
logstash
參考
https://www.elastic.co/guide/en/logstash/current/docker.html
https://www.elastic.co/guide/en/logstash/current/docker-config.html
docker pull docker.elastic.co/logstash/logstash:7.4.2
docker inspect docker.elastic.co/logstash/logstash:7.4.2
mkdir /opt/logstash -p
vim /opt/logstash/Dockerfile
jdk8
FROM openjdk:8 AS jdk FROM docker.elastic.co/logstash/logstash:7.4.2 COPY --from=jdk /usr/local/openjdk-8 /usr/local ENV JAVA_HOME=/usr/local/openjdk-8 ENV PATH=$JAVA_HOME/bin:$PATH ENV CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar EXPOSE 9600 EXPOSE 5044
內置jdk會拋警告
FROM docker.elastic.co/logstash/logstash:7.4.2 EXPOSE 9600 EXPOSE 5044
vim /opt/logstash/docker-compose.yml
version: '2.2' services: logstash: image: v-logstash restart: always container_name: logstash build: context: . dockerfile: Dockerfile environment: - PATH_DATA=/usr/share/logstash/data - XPACK_MONITORING_ELASTICSEARCH_HOSTS=http://192.168.1.101:9200 - XPACK_MONITORING_ENABLED=false ports: - 9600:9600 - 5044:5044 network_mode: host healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:9600/_node/stats"] retries: 300 interval: 1s volumes: - ./usr/share/logstash/pipeline/logstash1.conf:/usr/share/logstash/pipeline/logstash1.conf
command: ./bin/logstash -f ./pipeline/logstash1.conf --path.data=./data1
或多個命令
command:
- /bin/sh
- -c
- |
./bin/logstash -f ./pipeline/logstash1.conf --path.data=./data1
./bin/logstash -f ./pipeline/logstash2.conf --path.data=./data2
掛載data目錄會報錯
volumes:
- ./usr/share/logstash/data1/:/usr/share/logstash/data1/
也能夠用先寫好配置文件的方式
vim /opt/logstash/docker-compose.yml
version: '2.2' services: logstash: image: v-logstash restart: always container_name: logstash build: context: . dockerfile: Dockerfile ports: - 9600:9600 - 5044:5044 network_mode: host healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:9600/_node/stats"] retries: 300 interval: 1s volumes: - ./usr/share/logstash/pipeline/logstash1.conf:/usr/share/logstash/pipeline/logstash1.conf - ./usr/share/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
command: ./bin/logstash -f ./pipeline/logstash1.conf --path.data=./data1
或多個命令
command:
- /bin/sh
- -c
- |
./bin/logstash -f ./pipeline/logstash1.conf --path.data=./data1
./bin/logstash -f ./pipeline/logstash2.conf --path.data=./data2
掛載data目錄會報錯
volumes:
- ./usr/share/logstash/data1:/usr/share/logstash/data1
mkdir -p /opt/logstash/usr/share/logstash/config/
vim /opt/logstash/usr/share/logstash/config/logstash.yml
http.host: 0.0.0.0 path.data: /usr/share/logstash/data1 xpack.monitoring.elasticsearch.hosts: [ "http://192.168.1.101:9200" ] xpack.monitoring.enabled: false
vim /opt/logstash/usr/share/logstash/pipeline/logstash1.conf
input{ stdin{ } } output { elasticsearch { hosts => ["192.168.1.101:9200"] } }
input{ stdin{ } } output { elasticsearch { hosts => ["192.168.1.101:9200"] user => "elastic" password => "changeme" } }
input{stdin{}}output{stdout{codec=>rubydebug}}
input{stdin{}}output{stdout{codec=>json_lines}}
input { beats { port => 5044 } } output { stdout { codec => rubydebug } }
##host:port是logback中appender中的 destination
logback->logstash->es
input { tcp { port => "9601" mode => "server" tags => "tags_test" codec => json_lines } } output { elasticsearch { hosts => "192.168.0.101:9200" index => "log-demo1" } }
tags的值自行設置
若是json文件比較長,須要換行的話,建議用codec=>"json_lines"插件
logback->kafka->logstash->es
input { kafka { topics => "log" bootstrap_servers => "192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013" tags => "tags_test" codec => "json" } } filter { } output { stdout { codec => rubydebug } elasticsearch { hosts => "192.168.0.101:9200" index => "log-kafka-demo1" } }
若是logback的kafka輸出pattern不是json格式,logstash需設置爲codec => "plain"
若是logback的kafka輸出pattern爲json格式,logstash需設置爲codec => "json",不能是"json_lines"
pattern如:
<appender name="KAFKA" class="com.github.danielwegener.logback.kafka.KafkaAppender"> <filter class="com.wintersoft.spali.demo.user2.filter.LogFilter"/> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>{"logger_name":"%logger","thread_name":"%thread","level":"%-5level","message":"%msg"}</pattern> <charset>utf8</charset> </encoder> <topic>log</topic> <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/> <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/> <producerConfig>bootstrap.servers=${Kafka_Log_Servers}</producerConfig> </appender>
全部key帶s的參數不要用["xxx","yyy"] 的格式,應該用"xxx,yyy"的格式
cd /opt/logstash
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看日誌
docker logs --tail="500" logstash
docker logs -f logstash
進入容器
docker exec -it logstash /bin/bash
檢查容器
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --help
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --version
netstat -anltp|grep 9600
netstat -anltp|grep 5044
複製配置文件
docker cp logstash:/usr/share/logstash/config/logstash.yml /opt/logstash/logstash_bak.yml
docker cp logstash:/usr/share/logstash/pipeline/logstash1.conf /opt/logstash/logstash1_bak.conf
測試
數據放data1以另外一個實例運行
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{stdout{codec=>rubydebug}}'
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{stdout{codec=>json_lines}}'
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{elasticsearch{hosts=>"192.168.1.101:9200"}}'
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash -f /usr/share/logstash/pipeline/logstash2.conf --path.data=./data2
----------------------------
Filebeat
docker pull docker.elastic.co/beats/filebeat:7.4.2
docker inspect docker.elastic.co/beats/filebeat:7.4.2
參考
https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html
mkdir /opt/filebeat -p
vim /opt/filebeat/Dockerfile
FROM docker.elastic.co/beats/filebeat:7.4.2
vim /opt/filebeat/docker-compose.yml
version: '2.2'
services:
filebeat:
image: v-filebeat
restart: always
container_name: filebeat
build:
context: .
dockerfile: Dockerfile
network_mode: host
volumes:
- ./usr/share/filebeat/data/:/usr/share/filebeat/data/
- ./usr/share/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml
mkdir /opt/filebeat/usr/share/filebeat -p
vim /opt/filebeat/usr/share/filebeat/filebeat.yml
filebeat.config: modules: path: ${path.config}/modules.d/*.yml reload.enabled: false processors: - add_cloud_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:192.168.1.101:9200}' username: '${ELASTICSEARCH_USERNAME:}' password: '${ELASTICSEARCH_PASSWORD:}'
output.elasticsearch:
hosts: ["192.168.1.101:9200"]
output.logstash:
hosts: ["192.168.1.101:5044"]
cd /opt/filebeat
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看日誌
docker logs --tail="500" filebeat
docker logs -f filebeat
進入容器
docker exec -it filebeat /bin/bash
檢查容器
docker exec -it filebeat /bin/bash /usr/share/filebeat/bin/filebeat --help
docker exec -it filebeat /bin/bash /usr/share/filebeat/bin/filebeat --version
複製配置文件
docker cp filebeat:/usr/share/filebeat/filebeat.yml /opt/filebeat/filebeat_bak.yml
---------------------------------------
apm-server
docker pull docker.elastic.co/apm/apm-server:7.4.2
docker inspect docker.elastic.co/apm/apm-server:7.4.2
參考
https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html
mkdir /opt/apm-server -p
vim /opt/apm-server/Dockerfile
FROM docker.elastic.co/apm/apm-server:7.4.2 EXPOSE 8200
docker-compose參考
https://github.com/elastic/apm-server/blob/master/tests/docker-compose.yml
vim /opt/apm-server/docker-compose.yml
version: '2.2' services: apm-server: image: v-apm-server restart: always container_name: apm-server build: context: . dockerfile: Dockerfile environment: - output.elasticsearch.hosts=['http://192.168.1.101:9200'] - apm-server.host="0.0.0.0:8200" - setup.kibana.host="192.168.1.101:5601" network_mode: host ports: - 8200:8200 healthcheck: test: ["CMD", "curl", "-f" ,"http://127.0.0.1:8200/"]
command: apm-server -e -d "*" -E apm-server.host="0.0.0.0:8200" -E apm-server.expvar.enabled=true -E output.elasticsearch.hosts=['http://192.168.1.101:9200']
volumes:
- ./usr/share/apm-server/apm-server.yml:/usr/share/apm-server/apm-server.yml
volumes:
- ./usr/share/apm-server/data:/usr/share/apm-server/data:ro
cd /opt/apm-server
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看日誌
docker logs --tail="500" apm-server
docker logs -f apm-server
進入容器
docker exec -it apm-server /bin/bash
檢查容器
docker exec -it apm-server /bin/bash /usr/share/apm-server/bin/apm-server --help
docker exec -it apm-server /bin/bash /usr/share/apm-server/bin/apm-server --version
複製配置文件docker cp apm-server:/usr/share/apm-server/apm-server.yml /opt/apm-server/apm-server_bak.yml