HAproxy
:當其中一臺ElasticSearch Master
宕掉時,ElasticSearch集羣
會自動將運行正常的節點提高爲Master
,但HAproxy
不會將失敗的請求從新分發到新的Master Node
。ElasticSearch
:單search load balancer(外層負載均衡節點)
、雙coordinator(調度節點)
、若干workhorse(數據節點)
。前後在200併發Index、200併發Update測試下(跑在虛擬機下,線程太多就卡爆了),並先後分別測試了Down掉一臺主coordinator
、Down掉一臺workhorse
,都沒有引發數據異常,集羣工做正常
。HAproxy
搭建集羣失敗的配置吧:#全局配置 global daemon nbproc 4 pidfile /tmp/haproxy.pid #默認配置 defaults mode http #默認的模式mode { tcp|http|health },tcp是4層,http是7層,health只會返回OK retries 2 #兩次鏈接失敗就認爲是服務器不可用,也能夠經過後面設置 option redispatch #當serverId對應的服務器掛掉後,強制定向到其餘健康的服務器 option httpclose #HAProxy會針對客戶端的第一條請求的返回添加cookie並返回給客戶端,客戶端發送後續請求時會發送此cookie到HAProxy #option abortonclose #當服務器負載很高的時候,自動結束掉當前隊列處理比較久的連接 maxconn 4096 #默認的最大鏈接數 timeout connect 5000ms #鏈接超時 timeout client 30000ms #客戶端超時 timeout server 30000ms #服務器超時 timeout check 2000 #心跳檢測超時 log 127.0.0.1 local0 err #[err warning info debug] #統計頁面配置 listen admin_stats bind 0.0.0.0:8888 #監聽端口 mode http #http的7層模式 option httplog #採用http日誌格式 #log 127.0.0.1 local0 err maxconn 10 stats refresh 30s #統計頁面自動刷新時間 stats uri / #統計頁面url stats realm XingCloud\ Haproxy #統計頁面密碼框上提示文本 stats auth admin:admin #統計頁面用戶名和密碼設置 #stats hide-version #隱藏統計頁面上HAProxy的版本信息 #ElasticSearch Frontend frontend eshttp bind 0.0.0.0:9200 mode tcp use_backend eshttp_server #ElasticSearch Backend backend eshttp_server server eshttp1 vm12:9200 cookie 1 check inter 2000 rise 3 fall 3 weight 2 server eshttp2 vm13:9200 cookie 2 check inter 2000 rise 3 fall 3 weight 1 server eshttp3_bk vm14:9200 cookie 3 check inter 1000 rise 3 fall 3 backup
ElasticSearch
搭建集羣的關鍵幾個配置:cluster.name: harold #集羣名稱 node.name: "harold_lb" #節點名稱 # 3. You want this node to be neither master nor data node, but # to act as a "search load balancer" (fetching data from nodes, # aggregating results, etc.) # node.master: false node.data: false discovery.zen.ping.unicast.hosts: ["vm11", "vm12", "vm13", "vm14", "vm15", "vm16"] #自動發現節點hosts
cluster.name: harold #集羣名稱 node.name: "harold_coordinator_1" #節點名稱 # 2. You want this node to only serve as a master: to not store any data and # to have free resources. This will be the "coordinator" of your cluster. # node.master: true node.data: false discovery.zen.ping.unicast.hosts: ["vm11", "vm12", "vm13", "vm14", "vm15", "vm16"] #自動發現節點hosts
cluster.name: harold #集羣名稱 node.name: "harold_data_1" #節點名稱 # 1. You want this node to never become a master node, only to hold data. # This will be the "workhorse" of your cluster. # node.master: false node.data: true discovery.zen.ping.unicast.hosts: ["vm11", "vm12", "vm13", "vm14", "vm15", "vm16"] #自動發現節點hosts
主分片
與複製分片
:curl -XPUT -d'{"settings":{"number_of_shards":6, "number_of_replicas":1}}' http://vm11:9200/app1
php
Tip:node
number_of_shards
主分片在集羣中的總數量
number_of_replicas
每一個主分片的複製分片數量redis
#複製分片在從此的分佈式集羣變化過程當中,隨時均可以根據業務進行新增或減小: curl -XPUT -d'{"number_of_replicas":2}' http://vm11:9200/app1/_settings
#另外,ElasticSearch在沒有任何索引的狀況下新增一個文檔,便自動建立了索引,爲避免發生這種狀況,能夠在配置文件中添加: action.auto_create_index: false
curl -XDELETE http://vm11:9200/app1
服務器
當單獨把Master Coordinator
Down掉後,/_plugin/head/插件頁面會是這個樣子:
但可喜的是,並不影響集羣與集羣客戶端之間數據的CRUD操做。
數據有所改變並且較長一段時間後(大約10幾分鐘?),/_plugin/head/插件頁面會恢復正常。swoole
<?php class es extends Command { /** * The name and signature of the console command. * * @var string */ protected $signature = 'es:test'; /** * The console command description. * * @var string */ protected $description = 'Command description.'; private $hosts = ["vm11:9200"]; private $index = "app1"; private $type = "users1"; private $process = 200; private $sum = 10000; private $num_per_proc; /** * Create a new command instance. * * @return void */ public function __construct() { parent::__construct(); $this->sum % $this->process !== 0 && die("invalid num. \n"); $this->num_per_proc = $this->sum / $this->process; } private function insert() { $es = new ClientBuilder(); $es->setHosts($this->hosts); $client = $es->build(); $words = str_split("abcdefghijklmnopqrstuvwxyz"); $birth_year = []; for ($i = 1; $i <= 50; $i++) { $birth_year[] = 1960 + $i; } $type = ['1', '2', '3', '4']; $process = []; for ($p = 0; $p < $this->process; $p++) { $process[] = new \swoole_process(function () use ($client, $birth_year, $type, $words, $p) { for ($i = $this->num_per_proc * $p; $i < $this->num_per_proc * ($p + 1); $i++) { $client->index([ 'index' => $this->index, 'type' => $this->type, 'id' => $i, 'body' => [ 'birth_year' => $birth_year[array_rand($birth_year)], 'type' => $type[array_rand($type)], 'name' => $words[mt_rand(0, 25)] . $words[mt_rand(0, 25)] . $words[mt_rand(0, 25)] . $words[mt_rand(0, 25)], 'height' => mt_rand(150, 200), 'weight' => mt_rand(40, 200), 'test' => 1, 'userid' => $i ] ]); } }); } foreach ($process as $p) { $pid = $p->start(); echo $pid . "\n"; } } private function update() { $es = new ClientBuilder(); $es->setHosts($this->hosts); $client = $es->build(); $process = []; for ($i = 0; $i < $this->process; $i++) { $process[] = new \swoole_process(function () use ($client, $i) { $response = $client->search([ 'index' => $this->index, 'type' => $this->type, 'size' => $this->num_per_proc, 'from' => $this->num_per_proc * $i, 'sort' => "userid:asc" ]); foreach ($response['hits']['hits'] as $v) { $id = $v['_id']; $test = $v['_source']['test']; $test++; file_put_contents("/tmp/s", $test . "\n", FILE_APPEND); $client->update([ 'index' => $this->index, 'type' => $this->type, 'id' => $id, 'body' => [ 'doc' => [ 'test' => $test ] ] ]); } }); } foreach ($process as $p) { $pid = $p->start(); echo $pid . "\n"; } } private function gets() { $es = new ClientBuilder(); $es->setHosts($this->hosts); $client = $es->build(); $response = $client->search([ 'index' => $this->index, 'type' => $this->type, 'size' => 5000, 'from' => 500, 'sort' => "userid:asc" ]); foreach ($response['hits']['hits'] as $v) { $id = $v['_id']; $test = $v['_source']['test']; // file_put_contents("/tmp/s", $test . "\n", FILE_APPEND); var_dump($test); } } /** * Execute the console command. * * @return mixed */ public function handle() { $this->insert(); } }