http://blog.sina.com.cn/s/blog_8ea8e9d50102wwik.htmlhtml
本實驗採用不一樣類型的節點集羣(client x1, master x3, data x2)java
docker run -d --restart=always -p 9200:9200 -p 9300:9300 --name=elasticsearch-client --oom-kill-disable=true --memory-swappiness=1 -v /opt/elasticsearch/data:/usr/share/elasticsearch/data -v /opt/elasticsearch/logs:/usr/share/elasticsearch/logs elasticsearch:2.3.3node
cat >elasticsearch.yml <<HEREdocker
cluster.name: elasticsearch_clusterbootstrap
node.name: ${HOSTNAME}網絡
node.master: falseapp
node.data: false負載均衡
path.data: /usr/share/elasticsearch/datacurl
path.logs: /usr/share/elasticsearch/logsjvm
bootstrap.mlockall: true
network.host: 0.0.0.0
network.publish_host: 192.168.8.10
transport.tcp.port: 9300
http.port: 9200
index.refresh_interval: 5s
script.inline: true
script.indexed: true
docker restart elasticsearch-client
直接將修改好的配置文件cp到容器對應位置後重啓容器
man docker-run
--net="bridge"
Set the Network mode for the container
'bridge': create a network stack on the default Docker bridge
'none': no networking
'container:': reuse another container's network stack
'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore consid‐
ered insecure.
說明:docker默認的網絡模式爲bridge,會自動爲容器分配一個私有地址,若是是多臺宿主機之間集羣通訊須要藉助Consul,Etcd,Doozer等服務發現,自動註冊組件來協同。請參看Docker集羣之Swarm+Consul+Shipyard
必須經過network.publish_host: 192.168.8.10參數指定elasticsearch節點對外的監聽地址,很是重要,不指定的話,集羣節點間沒法正常通訊,報錯以下
[2016-06-21 05:50:19,123][INFO ][discovery.zen ] [consul-s2.example.com] failed to send join request to master[{consul-s1.example.com}{DeKixlVMS2yoynzX8Y-gdA}{172.17.0.1}{172.17.0.1:9300}{data=false, master=true}], reason [RemoteTransportException[[consul-s2.example.com][172.17.0.1:9300]
最簡單的,還能夠網絡直接設爲host模式--net=host,直接借用宿主機的網絡接口,換言之,不作網絡層的layer
同時可禁用OOM,還可根據宿主機內存來設置-m參數(默認爲0,無限)限制容器內存大小
[root@ela-client ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
762e4d21aaf8 elasticsearch:2.3.3 "/docker-entrypoint.s" 2 minutes ago Up 2 minutes elasticsearch-client
[root@ela-client ~]# netstat -tunlp|grep java
tcp 0 0 0.0.0.0:9200 0.0.0.0:* LISTEN 18952/java
tcp 0 0 0.0.0.0:9300 0.0.0.0:* LISTEN 18952/java
[root@ela-client ~]# ls /opt/elasticsearch/
data logs
[root@ela-client ~]# docker logs $(docker ps -q)
[2016-06-13 16:09:51,308][INFO ][node ] [Sunfire] version[2.3.3], pid[1], build[218bdf1/2016-05-17T15:40:04Z]
[2016-06-13 16:09:51,311][INFO ][node ] [Sunfire] initializing ...
... ...
[2016-06-13 16:09:56,408][INFO ][node ] [Sunfire] started
[2016-06-13 16:09:56,417][INFO ][gateway ] [Sunfire] recovered [0] indices into cluster_state
cat >/opt/elasticsearch/config/elasticsearch.yml <<HERE
cluster.name: elasticsearch_cluster
node.name: ${HOSTNAME}
node.master: false
node.data: false
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
bootstrap.mlockall: true
network.host: 0.0.0.0
network.publish_host: 192.168.8.10
transport.tcp.port: 9300
http.port: 9200
index.refresh_interval: 5s
script.inline: true
script.indexed: true
cat >/opt/elasticsearch/config/elasticsearch.yml <<HERE
cluster.name: elasticsearch_cluster
node.name: ${HOSTNAME}
node.master: true
node.data: false
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
bootstrap.mlockall: true
network.host: 0.0.0.0
network.publish_host: 192.168.8.101
transport.tcp.port: 9300
http.port: 9200
index.refresh_interval: 5s
script.inline: true
script.indexed: true
cat >/opt/elasticsearch/config/elasticsearch.yml <<HERE
cluster.name: elasticsearch_cluster
node.name: ${HOSTNAME}
node.master: true
node.data: false
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
bootstrap.mlockall: true
network.host: 0.0.0.0
network.publish_host: 192.168.8.102
transport.tcp.port: 9300
http.port: 9200
index.refresh_interval: 5s
script.inline: true
script.indexed: true
cat >/opt/elasticsearch/config/elasticsearch.yml <<HERE
cluster.name: elasticsearch_cluster
node.name: ${HOSTNAME}
node.master: true
node.data: false
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
bootstrap.mlockall: true
network.publish_host: 192.168.8.103
transport.tcp.port: 9300
network.host: 0.0.0.0
http.port: 9200
index.refresh_interval: 5s
script.inline: true
script.indexed: true
cat >/opt/elasticsearch/config/elasticsearch.yml <<HERE
cluster.name: elasticsearch_cluster
node.name: ${HOSTNAME}
node.master: false
node.data: true
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
bootstrap.mlockall: true
network.publish_host: 192.168.8.201
transport.tcp.port: 9300
network.host: 0.0.0.0
http.port: 9200
index.refresh_interval: 5s
script.inline: true
script.indexed: true
cat >/opt/elasticsearch/config/elasticsearch.yml <<HERE
cluster.name: elasticsearch_cluster
node.name: ${HOSTNAME}
node.master: false
node.data: true
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
bootstrap.mlockall: true
network.host: 0.0.0.0
network.publish_host: 192.168.8.202
transport.tcp.port: 9300
http.port: 9200
index.refresh_interval: 5s
script.inline: true
script.indexed: true
經過discovery模塊來將節點加入集羣
在以上節點的配置文件/opt/elasticsearch/config/elasticsearch.yml中加入以下行後重啓
cat >>/opt/elasticsearch/config/elasticsearch.yml <<HERE
discovery.zen.ping.timeout: 100s
discovery.zen.fd.ping_timeout: 100s
discovery.zen.ping.multicast.enabled: false
discovery.zen.ping.unicast.hosts: ["192.168.8.101:9300", "192.168.8.102:9300", "192.168.8.103:9300", "192.168.8.201:9300", "192.168.8.202:9300","192.168.8.10:9300"]
discovery.zen.minimum_master_nodes: 2
gateway.recover_after_nodes: 2
HERE
docker restart $(docker ps -a|grep elasticsearch|awk '{print $1}')
五.確認集羣
等待30s左右,集羣節點會自動join,在集羣的含意節點上都會看到以下相似輸出,說明集羣運行正常
REST API調用請參看Elasticsearch REST API小記
https://www.elastic.co/guide/en/elasticsearch/reference/current/_cluster_health.html
[root@ela-client ~]#curl 'http://localhost:9200/_cat/health?v'
epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1465843145 18:39:05 elasticsearch_cluster green 6 2 0 0 0 0 0 0 - 100.0%
[root@ela-client ~]#curl 'localhost:9200/_cat/nodes?v'
host ip heap.percent ram.percent load node.role master name
192.168.8.102 192.168.8.102 14 99 0.00 - * ela-master2.example.com
192.168.8.103 192.168.8.103 4 99 0.14 - m ela-master3.example.com
192.168.8.202 192.168.8.202 11 99 0.00 d - ela-data2.example.com
192.168.8.10 192.168.8.10 10 98 0.17 - - ela-client.example.com
192.168.8.201 192.168.8.201 11 99 0.00 d - ela-data1.example.com
192.168.8.101 192.168.8.101 12 99 0.01 - m ela-master1.example.com
[root@ela-master2 ~]#curl 'http://localhost:9200/_nodes/process?pretty'
{
"cluster_name" : "elasticsearch_cluster",
"nodes" : {
"naMz_y4uRRO-FzyxRfTNjw" : {
"name" : "ela-data2.example.com",
"transport_address" : "192.168.8.202:9300",
"host" : "192.168.8.202",
"ip" : "192.168.8.202",
"version" : "2.3.3",
"build" : "218bdf1",
"http_address" : "192.168.8.202:9200",
"attributes" : {
"master" : "false"
},
"process" : {
"refresh_interval_in_millis" : 1000,
"id" : 1,
"mlockall" : false
}
},
"7FwFY20ESZaRtIWhYMfDAg" : {
"name" : "ela-data1.example.com",
"transport_address" : "192.168.8.201:9300",
"host" : "192.168.8.201",
"ip" : "192.168.8.201",
"version" : "2.3.3",
"build" : "218bdf1",
"http_address" : "192.168.8.201:9200",
"attributes" : {
"master" : "false"
},
"process" : {
"refresh_interval_in_millis" : 1000,
"id" : 1,
"mlockall" : false
}
},
"X0psLpQyR42A4ThiP8ilhA" : {
"name" : "ela-master3.example.com",
"transport_address" : "192.168.8.103:9300",
"host" : "192.168.8.103",
"ip" : "192.168.8.103",
"version" : "2.3.3",
"build" : "218bdf1",
"http_address" : "192.168.8.103:9200",
"attributes" : {
"data" : "false",
"master" : "true"
},
"process" : {
"refresh_interval_in_millis" : 1000,
"id" : 1,
"mlockall" : false
}
},
"MG_GlSAZRkqLq8gMqaZITw" : {
"name" : "ela-master1.example.com",
"transport_address" : "192.168.8.101:9300",
"host" : "192.168.8.101",
"ip" : "192.168.8.101",
"version" : "2.3.3",
"build" : "218bdf1",
"http_address" : "192.168.8.101:9200",
"attributes" : {
"data" : "false",
"master" : "true"
},
"process" : {
"refresh_interval_in_millis" : 1000,
"id" : 1,
"mlockall" : false
}
},
"YxNHUPqVRNK3Liilw_hU9A" : {
"name" : "ela-master2.example.com",
"transport_address" : "192.168.8.102:9300",
"host" : "192.168.8.102",
"ip" : "192.168.8.102",
"version" : "2.3.3",
"build" : "218bdf1",
"http_address" : "192.168.8.102:9200",
"attributes" : {
"data" : "false",
"master" : "true"
},
"process" : {
"refresh_interval_in_millis" : 1000,
"id" : 1,
"mlockall" : false
}
},
"zTKJJ4ipQg6xAcwy1aE-9g" : {
"name" : "ela-client.example.com",
"transport_address" : "192.168.8.10:9300",
"host" : "192.168.8.10",
"ip" : "192.168.8.10",
"version" : "2.3.3",
"build" : "218bdf1",
"http_address" : "192.168.8.10:9200",
"attributes" : {
"data" : "false",
"master" : "true"
},
"process" : {
"refresh_interval_in_millis" : 1000,
"id" : 1,
"mlockall" : false
}
}
}
}
問題:
修改jvm內存,初始值-Xms256m -Xmx1g
https://hub.docker.com/r/itzg/elasticsearch/
https://hub.docker.com/_/elasticsearch/
實測:修改JAVA_OPTS,ES_JAVA_OPTS都只能追加,而不能覆蓋,只能經過
-e ES_HEAP_SIZE="32g" 來覆蓋