https://www.elastic.co/guide/en/elasticsearch/reference/5.0/modules-snapshots.html
https://www.elastic.co/guide/en/elasticsearch/guide/current/_rolling_restarts.html
https://blog.csdn.net/u014431852/article/details/52905821html
阿里雲elasticsearch集羣5.0版本
微軟雲elasticsearch集羣5.6版本node
須要把阿里雲elasticsearch集羣新老數據遷移到微軟雲elasticsearch集羣nginx
新數據比較好弄數據源輸出到新的微軟雲kafka集羣而後微軟雲logstash消費新數據到新elasticsearch集羣便可,關於老數據遷移比較麻煩,但官網也給了成熟的解決方案既是快照備份與還原,下面實施過程既是對實施過程的記錄網絡
一、修改elasticsearch.yml配置,添加以下
path.repo: /storage/esdata
設置索引備份快照路徑
注意全部的master節點與data節點都須要配置curl
二、關閉自動平衡elasticsearch
curl -XPUT http://10.10.88.86:9200/_cluster/settings -d'
{
"transient" : {
"cluster.routing.allocation.enable" : "none"
}
}'
elasticsearch日誌
[root@elk-es01 storage]# tail -f es-cluster.log
[2018-05-11T15:24:15,605][INFO ][o.e.c.s.ClusterSettings ] [elk-es01] updating [cluster.routing.allocation.enable] from [ALL] to [none]ide
三、重啓elasticseach ui
四、啓動自動平衡google
curl -XPUT http:///10.10.88.86:9200/_cluster/settings -d'
{
"transient" : {
"cluster.routing.allocation.enable" : "all"
}
}'阿里雲
五、重複循環一、二、三、4操做在剩下幾個節點,最好經過/_cat/health查看集羣恢復100%後操做下一次,若是不關閉allocation重啓elasticsearch集羣恢復時間很長會從0%開始,關閉後從節點數減一除以總節點數比例開始恢復會塊不少
curl -XPUT http:///10.10.88.86:9200/_snapshot/client_statistics -d'
{
"type": "fs",
"settings": {
"location": "/storage/esdata",
"compress": true,
"max_snapshot_bytes_per_sec" : "50mb",
"max_restore_bytes_per_sec" : "50mb"
}
}'
注意錯誤
報錯沒有權限
store location [/storage/esdata] is not accessible on the node [{elk-es04}
{"error":{"root_cause":[{"type":"repository_verification_exception","reason":"[client_statistics] [[TgGhv7V1QGagb_PNDyXM-w, 'RemoteTransportException[[elk-es04][/10.10.88.89:9300][internal:admin/repository/verify]]; nested: RepositoryVerificationException[[client_statistics] store location [/storage/esdata] is not accessible on the node [{elk-es04}{TgGhv7V1QGagb_PNDyXM-w}{7dxKWcF3QreKMZOKhTFgeg}{/10.10.88.89}{/10.10.88.89:9300}]]; nested: AccessDeniedException[/storage/esdata/tests-_u6h8XrJQ32dYPaC7zFC_w/data-TgGhv7V1QGagb_PNDyXM-w.dat];']]"}],"type":"repository_verification_exception","reason":"[client_statistics] [[TgGhv7V1QGagb_PNDyXM-w, 'RemoteTransportException[[elk-es04][10.51.57.54:9300][internal:admin/repository/verify]]; nested: RepositoryVerificationException[[client_statistics] store location [/storage/esdata] is not accessible on the node [{elk-es04}{TgGhv7V1QGagb_PNDyXM-w}{7dxKWcF3QreKMZOKhTFgeg}{/10.10.88.89}{/10.10.88.89:9300}]]; nested: AccessDeniedException[/storage/esdata/tests-_u6h8XrJQ32dYPaC7zFC_w/data-TgGhv7V1QGagb_PNDyXM-w.dat];']]"},"status":500}
解決方法:
發現node1,node2,node3的es權限是500,node4的權限是501,最近比較背,感受任何一個小問題都會遇到不少意想不到的地方,這不google了下https://discuss.elastic.co/t/why-does-creating-a-repository-fail/22697/16顯然是權限問題,nfs這邊有些策略,權衡利弊想短期內解決問題
打算修改node4的es的uid和gid都爲500,使其與前三個節點保持一致
http://www.javashuo.com/article/p-uwlrcxay-gz.html
usermod -u 500 es
groupmod -g 500 es
注意usermod時須要es用戶沒有進程在活動,就是說elasticsearch進程須要關閉,重複上面的node,all操做,另外若是有其餘用戶佔用了500 id,相應的修改且注意相關進程。
若是不放心也可手動修改,主要是/home/es下的權限及es數據權限
find / -user 501 -exec chown -h es {} \;
find / -group 501 -exec chown -h es {} \;
再次執行就不報錯了:
[root@elk-es01 ~]# curl -XPUT http:///10.10.88.86:9200/_snapshot/client_statistics -d'
{
"type": "fs",
"settings": {
"location": "/storage/esdata",
"compress" : "true",
"max_snapshot_bytes_per_sec" : "50mb",
"max_restore_bytes_per_sec" : "50mb"
}
}'
{"acknowledged":true}[root@elk-es01 ~]#
查看配置
[root@elk-es01 ~]# curl -XGET http:///10.10.88.86:9200/_snapshot/client_statistics?pretty
{
"client_statistics" : {
"type" : "fs",
"settings" : {
"location" : "/storage/esdata",
"max_restore_bytes_per_sec" : "50mb",
"compress" : "true",
"max_snapshot_bytes_per_sec" : "50mb"
}
}
}
注意索引數量多可是數據量不大時能夠統配多一些index,保證每次遷移的數據量不至於太大,好比每次100G之內,防止網絡等其餘緣由致使傳輸中斷等
[root@elk-es01 ~]# curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.05.11 -d'
{
"indices": "logstash-nginx-accesslog-2018.05.11"
}'
{"accepted":true}[root@elk-es01 ~]#
2018年5月份只是前10天
curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.05 -d'
{
"indices": "logstash-nginx-accesslog-2018.05.0"
}'
curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.04 -d'
{
"indices": "logstash-nginx-accesslog-2018.04"
}'
curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.03 -d'
{
"indices": "logstash-nginx-accesslog-2018.03"
}'curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.02 -d'
{
"indices": "logstash-nginx-accesslog-2018.02"
}'
curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.01 -d'
{
"indices": "logstash-nginx-accesslog-2018.01"
}'
curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2017.12 -d'
{
"indices": "logstash-nginx-accesslog-2017.12"
}'
curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2017.11 -d'
{
"indices": "logstash-nginx-accesslog-2017.11*"
}'
例子
[root@elk-es01 ~]# curl -XPUT http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2017.11 -d'
{
"indices": "logstash-nginx-accesslog-2017.11*"
}'
{"accepted":true}[root@elk-es01 ~]#
[root@elk-es01 ~]# curl -XGET http://10.10.88.86:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2017.11?pretty
{
"snapshots" : [
{
"snapshot" : "logstash-nginx-accesslog-2017.11",
"uuid" : "PwPlyCbQQliZY3saog45LA",
"version_id" : 5000299,
"version" : "5.0.2",
"indices" : [
"logstash-nginx-accesslog-2017.11.20",
"logstash-nginx-accesslog-2017.11.17",
"logstash-nginx-accesslog-2017.11.24",
"logstash-nginx-accesslog-2017.11.30",
"logstash-nginx-accesslog-2017.11.22",
"logstash-nginx-accesslog-2017.11.18",
"logstash-nginx-accesslog-2017.11.15",
"logstash-nginx-accesslog-2017.11.16",
"logstash-nginx-accesslog-2017.11.27",
"logstash-nginx-accesslog-2017.11.26",
"logstash-nginx-accesslog-2017.11.19",
"logstash-nginx-accesslog-2017.11.21",
"logstash-nginx-accesslog-2017.11.28",
"logstash-nginx-accesslog-2017.11.23",
"logstash-nginx-accesslog-2017.11.25",
"logstash-nginx-accesslog-2017.11.29"
],
"state" : "IN_PROGRESS",
"start_time" : "2018-05-14T02:31:58.900Z",
"start_time_in_millis" : 1526265118900,
"failures" : [ ],
"shards" : {
"total" : 0,
"failed" : 0,
"successful" : 0
}
}
]
}
注意state 在IN_PROGERESS,變成SUCCESS時快照完成,注意SUCCESS時再執行下一次快照,若是index比較少時也能夠一次性執行,不分開。
yum -y install nfs-utils
mkdir -p /storage/esdata
mount -t nfs 192.168.88.20:/data/es-data01/backup /storage/esdata/
df -h
全部master節點與node節點都須要掛載nfs
scp阿里雲的數據包到微軟雲nfs服務端,注意須要先tar包,後解壓縮,scp前須要open***打通
scp test@10.10.88.89:/storage/esdata/es20180514.tar.gz ./
scp test@10.10.88.89:/storage/esdata/indices20180514.tar.gz ./
1)、修改elasticsearch.yml配置,添加以下
path.repo: /storage/esdata
#設置索引備份快照路徑
注意全部的master節點與data節點都須要配置
2)、關閉自動平衡操做同上
curl -XPUT http://192.168.88.24:9200/_cluster/settings -d'
{
"transient" : {
"cluster.routing.allocation.enable" : "none"
}
}'
sleep 3
/etc/init.d/elasticsearch restart
curl -XPUT http://192.168.88.24:9200/_cluster/settings -d'
{
"transient" : {
"cluster.routing.allocation.enable" : "all"
}
}'
curl http://192.168.88.20:9200/_cluster/health?pretty
1)、先建數據倉庫
curl -XPUT http://192.168.88.20:9200/_snapshot/client_statistics -d'
{
"type": "fs",
"settings": {
"location": "/storage/esdata",
"compress" : "true",
"max_snapshot_bytes_per_sec" : "50mb",
"max_restore_bytes_per_sec" : "50mb"
}
}'
2)、再恢復index
root@prod-elasticsearch-master-01 es]# curl -XPOST http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.05/_restore
{"accepted":true}
依次執行:注意只有當前任務執行完了,才能正確執行下一個任務,開始index狀態是yellow,等都變成green就正常了
curl -XPOST http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.04/_restore
curl -XPOST http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.03/_restore
curl -XPOST http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.02/_restore
curl -XPOST http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.01/_restore
curl -XPOST http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2017.12/_restore
curl -XPOST http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2017.11/_restore
3)、最後檢查狀態
[root@prod-elasticsearch-master-01 es]# curl -XGET http://192.168.88.20:9200/_snapshot/client_statistics/logstash-nginx-accesslog-2018.05/_status?pretty{"snapshots" : [{"snapshot" : "logstash-nginx-accesslog-2018.05","repository" : "client_statistics","uuid" : "VL9LHHUKTNCHx-xsVJD_eA","state" : "SUCCESS","shards_stats" : {"initializing" : 0,"started" : 0,"finalizing" : 0,"done" : 45,"failed" : 0,"total" : 45},"stats" : {"number_of_files" : 4278,"processed_files" : 4278,"total_size_in_bytes" : 22892376668,"processed_size_in_bytes" : 22892376668,"start_time_in_millis" : 1526280514416,"time_in_millis" : 505655},"indices" : {"logstash-nginx-accesslog-2018.05.08" : {"shards_stats" : {"initializing" : 0,"started" : 0,"finalizing" : 0,"done" : 5,"failed" : 0,"total" : 5},"stats" : {"number_of_files" : 524,"processed_files" : 524,"total_size_in_bytes" : 2617117488,"processed_size_in_bytes" : 2617117488,"start_time_in_millis" : 1526280514420,"time_in_millis" : 260276},"shards" : {"0" : {"stage" : "DONE","stats" : {"number_of_files" : 67,"processed_files" : 67,"total_size_in_bytes" : 569057817,"processed_size_in_bytes" : 569057817,"start_time_in_millis" : 1526280514420,"time_in_millis" : 68086}},"1" : {"stage" : "DONE","stats" : {"number_of_files" : 124,"processed_files" : 124,"total_size_in_bytes" : 499182013,"processed_size_in_bytes" : 499182013,"start_time_in_millis" : 1526280514446,"time_in_millis" : 62925}},"2" : {"stage" : "DONE","stats" : {"number_of_files" : 109,"processed_files" : 109,"total_size_in_bytes" : 478469125,"processed_size_in_bytes" : 478469125,"start_time_in_millis" : 1526280698072,"time_in_millis" : 76624}},"3" : {"stage" : "DONE","stats" : {"number_of_files" : 124,"processed_files" : 124,"total_size_in_bytes" : 546347244,"processed_size_in_bytes" : 546347244,"start_time_in_millis" : 1526280653094,"time_in_millis" : 103590}},"4" : {"stage" : "DONE","stats" : {"number_of_files" : 100,"processed_files" : 100,"total_size_in_bytes" : 524061289,"processed_size_in_bytes" : 524061289,"start_time_in_millis" : 1526280514456,"time_in_millis" : 69113}}}},"logstash-nginx-accesslog-2018.05.09" : {"shards_stats" : {"initializing" : 0,"started" : 0,"finalizing" : 0,"done" : 5,"failed" : 0,"total" : 5},"stats" : {"number_of_files" : 425,"processed_files" : 425,"total_size_in_bytes" : 2436583034,"processed_size_in_bytes" : 2436583034,"start_time_in_millis" : 1526280514425,"time_in_millis" : 505646},"shards" : {"0" : {"stage" : "DONE","stats" : {"number_of_files" : 94,"processed_files" : 94,"total_size_in_bytes" : 462380313,"processed_size_in_bytes" : 462380313,"start_time_in_millis" : 1526280971948,"time_in_millis" : 48123}},"1" : {"stage" : "DONE","stats" : {"number_of_files" : 103,"processed_files" : 103,"total_size_in_bytes" : 506505727,"processed_size_in_bytes" : 506505727,"start_time_in_millis" : 1526280851761,"time_in_millis" : 69562}},"2" : {"stage" : "DONE","stats" : {"number_of_files" : 73,"processed_files" : 73,"total_size_in_bytes" : 506830214,"processed_size_in_bytes" : 506830214,"start_time_in_millis" : 1526280514425,"time_in_millis" : 60508}},"3" : {"stage" : "DONE","stats" : {"number_of_files" : 52,"processed_files" : 52,"total_size_in_bytes" : 494390868,"processed_size_in_bytes" : 494390868,"start_time_in_millis" : 1526280593311,"time_in_millis" : 52673}},"4" : {"stage" : "DONE","stats" : {"number_of_files" : 103,"processed_files" : 103,"total_size_in_bytes" : 466475912,"processed_size_in_bytes" : 466475912,"start_time_in_millis" : 1526280583835,"time_in_millis" : 64169}}}}}}]}