總結redis第四部分(集羣搭建以及增長和刪除節點)


十6、redis集羣

參考網站「https://redis.io/topics/cluster-tutorialhtml

161集羣搭建

#下述是在一臺機器模擬六個節點,3主3從java

[root@redis ~]# cd /application/
[root@redis application]# cd redis
[root@redis redis]# mkdir redis-cluster
[root@redis redis-cluster]# 
[root@redis redis-cluster]# ls -l
total 24
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7001
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7002
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7003
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7004
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7005
drwxr-xr-x 2 root root 4096 Dec 10 17:51 7006
 
[root@redis redis-cluster]# cp/application/redis/conf/redis.conf /application/redis/conf/redis.conf.ori2
 
[root@redis redis-cluster]# cd 7001


 

[root@redis 7001]# vim redis.confnode

#下述是要設置的內容es6

bind 172.16.1.11    #必需要綁定每一個當前節點的IP。如今是一臺機器模擬集羣環境,因此配置一樣的IP便可,多臺機器就多個IP。redis

port 7001   #分別每一個節點對應每一個端口sql

daemonize yesvim

pidfile /var/run/redis_7001.pid #Redis服務停掉以後,這個目錄下就沒有這個文件了緩存

loglevel debugruby

logfile"/application/redis/logs/redis-7001.log"bash

dir "/application/redis/redis-cluster/7001" #指定數據文件存放位置,必需要指定不一樣的目錄位置,否則會丟失數據!

appendonly yes

appendfsync always

 

cluster-enabled yes #啓動集羣模式

cluster-config-file nodes-7001.conf #這裏的700X最好跟port對應!每一個node文件也不要相同!若是要從新建集羣,記得把node700*文件所有幹掉,否則會建立失敗,當建立集羣時,這個文件會從新建立。

cluster-node-timeout 5000

 

#其餘五個目錄的節點的改動只須要把上述7001目錄下的redis.conf中的全部7001改爲對應的端口便可。

#拷貝7001下的redis.conf文件到各個目錄下

[root@redis redis-cluster]# for((i=2;i<=6;i++));do cp/application/redis/redis-cluster/7001/redis.conf/application/redis/redis-cluster/700$i;done
[root@redis redis-cluster]# ls 700{2..6}/*
7002/redis.conf 7003/redis.conf 7004/redis.conf 7005/redis.conf  7006/redis.conf


#替換各個目錄下的7001爲對應端口並備份redis.conf

[root@redis redis-cluster]# for((i=2;i<=6;i++));do sed-i.ori "s#7001#700$i#g" /application/redis/redis-cluster/700$i/redis.conf;done


#查看各個目錄下的文件是否替換成功

[root@redis redis-cluster]# for((i=2;i<=6;i++));dogrep 700$i /application/redis/redis-cluster/700$i/redis.conf;done 
port 7002
pidfile "/var/run/redis_7002.pid"
logfile"/application/redis/logs/redis-7002.log"
dir "/application/redis/redis-cluster/7002"
cluster-config-file nodes-7002.conf
 
port 7003
pidfile "/var/run/redis_7003.pid"
logfile"/application/redis/logs/redis-7003.log"
dir "/application/redis/redis-cluster/7003"
cluster-config-file nodes-7003.conf
 
port 7004
pidfile "/var/run/redis_7004.pid"
logfile"/application/redis/logs/redis-7004.log"
dir "/application/redis/redis-cluster/7004"
cluster-config-file nodes-7004.conf
 
port 7005
pidfile "/var/run/redis_7005.pid"
logfile "/application/redis/logs/redis-7005.log"
dir "/application/redis/redis-cluster/7005"
cluster-config-file nodes-7005.conf
 
port 7006
pidfile "/var/run/redis_7006.pid"
logfile"/application/redis/logs/redis-7006.log"
dir "/application/redis/redis-cluster/7006"
cluster-config-file nodes-7006.conf


 

#緩存分佈式,每一個節點存放不一樣的數據。好比上述的6個節點,其中每個節點都必須知道其餘五個節點的存在。

 

#redis集羣須要用到ruby,對應下述中用到集羣啓動的redis-trib.rb,參考「1六、2redis-trib.rb解釋」

[root@redis run]# rpm -qa ruby
[root@redis run]# rpm -qa rubygems
[root@redis run]# yum install ruby rubygems -y
[root@redis run]# rpm -qa ruby rubygems
rubygems-1.3.7-5.el6.noarch
ruby-1.8.7.374-4.el6_6.x86_64
[root@redis run]# gem install redis #安裝redis和ruby的接口
Successfully installed redis-3.3.2
1 gem installed
Installing ri documentation for redis-3.3.2...
Installing RDoc documentation for redis-3.3.2...


 

#分別啓動六個redis實例

[root@redis redis-cluster]# for((i=1;i<=6;i++));doredis-server /application/redis/redis-cluster/700$i/redis.conf;done
[root@redis redis-cluster]# netstat -lntup|grep redis
tcp        0      0 172.16.1.11:7005            0.0.0.0:*                   LISTEN      4843/redis-server 1 
tcp        0      0 172.16.1.11:7006            0.0.0.0:*                   LISTEN      4845/redis-server 1 
tcp        0      0 172.16.1.11:17001           0.0.0.0:*                   LISTEN      4833/redis-server 1 
tcp        0      0 172.16.1.11:17002           0.0.0.0:*                   LISTEN      4837/redis-server 1 
tcp        0      0 172.16.1.11:17003           0.0.0.0:*                   LISTEN      4839/redis-server 1 
tcp        0      0172.16.1.11:17004          0.0.0.0:*                   LISTEN      4841/redis-server 1 
tcp        0      0 172.16.1.11:17005           0.0.0.0:*                   LISTEN      4843/redis-server 1 
tcp        0      0 172.16.1.11:17006           0.0.0.0:*                   LISTEN      4845/redis-server 1 
tcp        0      0 172.16.1.11:7001            0.0.0.0:*                   LISTEN      4833/redis-server 1 
tcp        0      0 172.16.1.11:7002            0.0.0.0:*                   LISTEN      4837/redis-server 1 
tcp        0      0 172.16.1.11:7003            0.0.0.0:*                   LISTEN      4839/redis-server 1 
tcp        0      0 172.16.1.11:7004            0.0.0.0:*                   LISTEN      4841/redis-server 1 
#上述17001到17006暫不肯定是什麼做用
 
[root@redis redis-cluster]# ps -ef|grep 700|grep -v grep 
root      4833      1  0 19:51 ?        00:00:00 redis-server 172.16.1.11:7001[cluster]                      
root      4837      1  0 19:51 ?        00:00:00 redis-server 172.16.1.11:7002[cluster]                      
root      4839      1  0 19:51 ?        00:00:00 redis-server 172.16.1.11:7003[cluster]                      
root      4841      1  0 19:51 ?        00:00:00 redis-server 172.16.1.11:7004[cluster]                      
root      4843      1  0 19:51 ?        00:00:00 redis-server 172.16.1.11:7005[cluster]                      
root      4845      1  0 19:51 ?        00:00:00 redis-server 172.16.1.11:7006[cluster]


 

#到redis的安裝目錄下,執行redis-trib.rb

[root@redis src]# pwd

/home/lly/tools/redis-3.2.5/src

 

#下述--replicas後面1的意思是:1是一個比例,主節點和從節點的比例,7001到7003必定是三個主節點,7004到7006是三個從節點,7001和7004是對應的一對主從節點。

#不過也能夠按照其餘比例操做節點數

#必定要注意這裏建立的IP,必定要跟對應端口中redis.conf用bind綁定的地址保持一致!

#經java集羣測試,java代碼中指定的IP地址必定要跟建立集羣時指定的地址保持一致!

#而不是能夠內外網IP通用!

[root@redis src]# ./redis-trib.rb create --replicas 1 172.16.1.11:7001 172.16.1.11:7002 172.16.1.11:7003 172.16.1.11:7004 172.16.1.11:7005 172.16.1.11:7006
>>> Creating cluster
>>> Performing hash slots allocation on 6nodes...
Using 3 masters:
172.16.1.11:7001
172.16.1.11:7002
172.16.1.11:7003
Adding replica 172.16.1.11:7004 to 172.16.1.11:7001
Adding replica 172.16.1.11:7005 to 172.16.1.11:7002
Adding replica 172.16.1.11:7006 to 172.16.1.11:7003


 

#下述M是指master,68e9252e2e2404a0ced500a98085acaa5754c7a2是節點的ID,是惟一的

M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001

   slots:0-5460(5461 slots) master     #一共5461個槽slots,能夠理解成數據分片:1塊空間分紅5461片。把一塊空間分紅了多個片,效率比較高。只有主節點纔有槽slots,能夠發現下述從節點沒有槽,因此從節點只支持讀操做,不支持寫操做。

M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002

   slots:5461-10922(5462 slots) master

M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003

  slots:10923-16383 (5461 slots) master

#S是slave

S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004

   replicates68e9252e2e2404a0ced500a98085acaa5754c7a2

S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005

   replicatesf314dda271d135634d6849cdb649192b58b08d7f

S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006

   replicates6385ebf9ea346525671b8c339614de4cb2a118cc

Can I set the above configuration? (type 'yes' toaccept): yes  #輸入yes

>>> Nodes configuration updated

>>> Assign a different config epoch to each node

>>> Sending CLUSTER MEET messages to join thecluster

Waiting for the cluster to join...

>>> Performing Cluster Check (using node 172.16.1.11:7001)

M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001

   slots:0-5460(5461 slots) master

   1 additionalreplica(s)

M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002

   slots:5461-10922(5462 slots) master

   1 additionalreplica(s)

S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006

   slots: (0 slots)slave

   replicates6385ebf9ea346525671b8c339614de4cb2a118cc

S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004

   slots: (0 slots)slave

   replicates68e9252e2e2404a0ced500a98085acaa5754c7a2

M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003

  slots:10923-16383 (5461 slots) master

   1 additionalreplica(s)

S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005

   slots: (0 slots)slave

   replicatesf314dda271d135634d6849cdb649192b58b08d7f

[OK] All nodes agree about slots configuration.

>>> Check for open slots...

>>> Check slots coverage...

[OK] All 16384 slots covered.

 

 

#集羣中止

#我的理解應該先中止從節點,再中止主節點

[root@redis ~]# for((i=6;i>=1;i--));do redis-cli -c -h172.16.1.11 -p 700$i shutdown;done
[root@redis ~]# netstat -lntup|grep redis|grep -v grep


 

 

162查看集羣中各個節點信息

#cluster詳細信息參考「1六、5redis cluster命令」

-c表示集羣模式 -h表示本機IP-p表示端口

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1      #主節點是從1到3,從節點同理。依次排序。
cluster_stats_messages_sent:47331
cluster_stats_messages_received:47331


 

#對應節點有myself標識

172.16.1.11:7001> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381814328 2 connected 5461-10922
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381813826 6 connected
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381813218 4 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 1 connected 0-5460
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381814328 3 connected 10923-16383
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381813321 5 connected
 
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7002
172.16.1.11:7002> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:2
cluster_stats_messages_sent:47456
cluster_stats_messages_received:47456
 
172.16.1.11:7002> cluster nodes
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381818884 6 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381818380 1 connected 0-5460
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381817372 4 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381817372 3 connected 10923-16383
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002myself,master - 0 0 2 connected 5461-10922
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381817374 5 connected
 
 
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7003
172.16.1.11:7003> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:3
cluster_stats_messages_sent:47279
cluster_stats_messages_received:47279
 
172.16.1.11:7003> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381827476 2 connected 5461-10922
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381826470 1 connected 0-5460
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381828484 5 connected
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381826972 6 connected
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381828482 4 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003myself,master - 0 0 3 connected 10923-16383
 
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7004
172.16.1.11:7004> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_sent:47229
cluster_stats_messages_received:47229
 
172.16.1.11:7004> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381832005 5 connected
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381832511 2 connected 5461-10922
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381831503 6 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381832006 3 connected 10923-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004myself,slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 0 4 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381832513 1 connected 0-5460
 
 
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7005
172.16.1.11:7005> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:2
cluster_stats_messages_sent:47928
cluster_stats_messages_received:47928
 
172.16.1.11:7005> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381836745 2 connected 5461-10922
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005myself,slave f314dda271d135634d6849cdb649192b58b08d7f 0 0 5 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381837752 3 connected 10923-16383
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381837753 4 connected
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481381838259 6 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381837250 1 connected 0-5460
 
 
[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7006
172.16.1.11:7006> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:3
cluster_stats_messages_sent:47525
cluster_stats_messages_received:47525
 
172.16.1.11:7006> cluster nodes
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006myself,slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 0 6 connected
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481381856221 3 connected 10923-16383
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481381857761 2 connected 5461-10922
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481381858237 1 connected
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481381857762 1 connected 0-5460
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481381856726 5 connected


 

 

163集羣節點測試

 

#首先經過keys命令查詢各個節點都沒有數據。

#在7001設置數據

172.16.1.11:7001> set name11 name11val1
-> Redirected to slot [11311] located at172.16.1.11:7003
OK


#跳到了獲取到值的7003節點上

172.16.1.11:7003> keys *
1) "name11"
172.16.1.11:7003> get name11
"name11val1"


 

#在7002查,雖然keys不出來,可是能獲得值

172.16.1.11:7002> keys *
(empty list or set)
172.16.1.11:7002> get name11
-> Redirected to slot [11311] located at172.16.1.11:7003
"name11val1"


 

#在7003查

172.16.1.11:7003> keys *
1) "name11"
172.16.1.11:7003> get name11
"name11val1"


 

#7004、700五、7006跟7002狀況一致

 

#意思是在某個節點下set的值不必定分配給本身節點的槽,可能分配給其餘節點的槽,在這個集羣的任何一個節點均可以get到任意節點set的值。

 

 

164redis-trib.rb詳解

[root@redis src]# ./redis-trib.rb   #建立集羣的命令,是用ruby寫的

Usage: redis-trib <command> <options><arguments ...>

 

  info            host:port #查看集羣信息

  set-timeout     host:port milliseconds    #設置集羣節點間心跳鏈接的超時時間

  help            (show this help)

  check           host:port #檢查集羣

  call            host:port command arg arg .. arg      #在集羣所有節點上執行命令

  import          host:port     #將外部redis數據導入集羣

                 --copy

                 --from <arg>

                 --replace

  rebalance       host:port     #平衡集羣節點slot數量

                 --threshold <arg>

                 --simulate

                 --pipeline <arg>

                 --auto-weights

                 --timeout <arg>

                 --use-empty-masters

                 --weight <arg>

  del-node        host:port node_id     #從集羣中刪除節點

  fix             host:port             #修復集羣

                 --timeout <arg>

  add-node        new_host:new_portexisting_host:existing_port #將新節點加入集羣

                 --master-id <arg>

                 --slave

  reshard         host:port         #在線遷移slot

                 --pipeline <arg>

                 --timeout <arg>

                 --from <arg>

                 --yes

                 --slots <arg>

                 --to <arg>

  create          host1:port1 ... hostN:portN       #建立集羣環境,以IP1:PORT1 ... IPN:PORTN的形式處理

                 --replicas <arg>

 

 

165redis cluster命令

//集羣(cluster)

CLUSTER INFO #打印集羣的信息

CLUSTER NODES #列出集羣當前已知的全部節點(node),以及這些節點的相關信息。

 

//節點(node)

CLUSTER MEET <ip> <port> #將 ip 和 port 所指定的節點添加到集羣當中,讓它成爲集羣的一份子。

CLUSTER FORGET <node_id> #從集羣中移除 node_id 指定的節點。

CLUSTER REPLICATE <node_id> #將當前節點設置爲 node_id 指定的節點的從節點。

CLUSTER SAVECONFIG #將節點的配置文件保存到硬盤裏面。

 

//槽(slot)

CLUSTER ADDSLOTS <slot> [slot ...] #將一個或多個槽(slot)指派(assign)給當前節點。

CLUSTER DELSLOTS <slot> [slot ...] #移除一個或多個槽對當前節點的指派。

CLUSTER FLUSHSLOTS #移除指派給當前節點的全部槽,讓當前節點變成一個沒有指派任何槽的節點。

CLUSTER SETSLOT <slot> NODE <node_id> #將槽 slot 指派給node_id 指定的節點,若是槽已經指派給另外一個節點,那麼先讓另外一個節點刪除該槽>,而後再進行指派。

CLUSTER SETSLOT <slot> MIGRATING <node_id> #將本節點的槽 slot 遷移到 node_id 指定的節點中。

CLUSTER SETSLOT <slot> IMPORTING <node_id> #從 node_id 指定的節點中導入槽 slot 到本節點。

CLUSTER SETSLOT <slot> STABLE #取消對槽 slot 的導入(import)或者遷移(migrate)。

 

//鍵 (key)

CLUSTER KEYSLOT <key> #計算鍵 key 應該被放置在哪一個槽上。

CLUSTER COUNTKEYSINSLOT <slot> #返回槽 slot 目前包含的鍵值對數量。

CLUSTER GETKEYSINSLOT <slot> <count> #返回 count 個slot 槽中的鍵。

 

#上述命令是redis集羣所獨有的,執行上述命令以前得先登陸

#好比

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1      
cluster_stats_messages_sent:47331
cluster_stats_messages_received:47331


 

 

166集羣增長/刪除節點

參考網站

「http://blog.51yip.com/nosql/1726.html/comment-page-1」

 

1661測試節點操做

    新建兩個服務,按照以前搭建集羣的方式新建一主一從兩個節點,分別是7007和7008,

節點的具體設置參考「1六、1集羣搭建」

 

#新增節點
[root@redis ~]# cd /application/redis/redis-cluster/
[root@redis redis-cluster]# cp -R 7001 7007
[root@redis redis-cluster]# cp -R 7001 7008
[root@redis redis-cluster]# sed -i.ori"s#7001#7007#g" 7007/redis.conf 
[root@redis redis-cluster]# sed -i.ori"s#7001#7008#g" 7008/redis.conf  
[root@redis redis-cluster]# grep 7007 7007/redis.conf 
port 7007
pidfile /var/run/redis_7007.pid
logfile"/application/redis/logs/redis-7007.log"
dir /application/redis/redis-cluster/7007
cluster-config-file nodes-7007.conf
[root@redis redis-cluster]# grep 70087008/redis.conf   
port 7008
pidfile /var/run/redis_7008.pid
logfile"/application/redis/logs/redis-7008.log"
dir /application/redis/redis-cluster/7008
cluster-config-file nodes-7008.conf
 
 
#測試節點
[root@redis redis-cluster]# ps -ef|grep redis|grep -vgrep
root      5372      1  0 00:21 ?        00:00:00 redis-server 172.16.1.11:7007[cluster]                      
root      5376      1  0 00:21 ?        00:00:00 redis-server 172.16.1.11:7008[cluster]

 

1662添加節點

16621添加主節點7007

#先啓動各個節點

[root@redis redis-cluster]# for((i=1;i<=6;i++));doredis-server /application/redis/redis-cluster/700$i/redis.conf;done


#添加主節點,172.16.1.11:7007爲新增節點,172.16.1.11:7001爲任一已存在節點

[root@redis redis-cluster]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb add-node 172.16.1.11:7007172.16.1.11:7001  
>>> Adding node 172.16.1.11:7007 to cluster172.16.1.11:7001
>>> Performing Cluster Check (using node172.16.1.11:7001)
M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
   slots:0-5460(5461 slots) master
   1 additionalreplica(s)
S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
   slots: (0 slots)slave
   replicatesf314dda271d135634d6849cdb649192b58b08d7f
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
  slots:10923-16383 (5461 slots) master
   1 additionalreplica(s)
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
   slots: (0 slots)slave
   replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
   slots:5461-10922(5462 slots) master
   1 additionalreplica(s)
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
   slots: (0 slots)slave
   replicates6385ebf9ea346525671b8c339614de4cb2a118cc
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 172.16.1.11:7007to make it join the cluster.
[OK] New node added correctly.


#查看集羣狀態

[root@redis redis-cluster]# redis-cli -c -h 172.16.1.11-p 7001
172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_sent:975
cluster_stats_messages_received:970
172.16.1.11:7001> cluster nodes
 
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481387384680 5 connected
 
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481387383128 3 connected 10923-16383
 
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481387382626 4 connected


 

#注意:當7007主節點添加成功以後,新增的節點不會有任何數據,由於它沒有分配任何的slot(hash槽),因此須要爲新節點手工分配slot。若是主節點要有寫權限的話,那麼必須有槽,能夠把已有節點的槽分配給新節點。Redis集羣在運行中,增長節點水平擴容是不會影響redis性能的。

4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007 master - 0 1481387382626 0 connected

 

68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 1 connected 0-5460

 

f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481387383629 2 connected 5461-10922

 

8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481387384135 6 connected

 

#爲7007分配slot槽(找到集羣中的任意一個主節點,對其進行從新分片操做)

[root@redis redis-cluster]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb reshard 172.16.1.11:7001
>>> Performing Cluster Check (using node172.16.1.11:7001)
M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
   slots:0-5460(5461 slots) master
   1 additionalreplica(s)
S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
   slots: (0 slots)slave
   replicatesf314dda271d135634d6849cdb649192b58b08d7f
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
  slots:10923-16383 (5461 slots) master
   1 additionalreplica(s)
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
   slots: (0 slots)slave
   replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
M: 4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007
   slots: (0 slots)master
   0 additionalreplica(s)
M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
   slots:5461-10922(5462 slots) master
   1 additional replica(s)
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
   slots: (0 slots)slave
   replicates6385ebf9ea346525671b8c339614de4cb2a118cc
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.


How many slots do you want tomove (from 1 to 16384)? 200  

#200是須要多少個槽移動到新的節點上,能夠本身設置,好比200個槽。主節點若是沒有slots的話,存取數據就不會被選中。

 

What is the receiving node ID?4a174a6203795d7f57f4be52534d41f01e164f84

#4a174a6203795d7f57f4be52534d41f01e164f84爲7007的ID。須要把這200個slot槽移動到那個節點上去(須要指定節點id),而且下個  提示是輸入all爲從全部主節點(7001 7002 7003)中分別抽取響應的槽數(一共爲200個槽到指定的新節點中!,而且會打印執行分片的計劃。)

Please enter all the source node IDs.

  Type 'all' to useall the nodes as source nodes for the hash slots.

  Type 'done' onceyou entered all the source nodes IDs.

Source node #1:all

#能夠把分配的過程理解成打撲克,all表示你們從新洗牌,而上述輸入的接收主節點ID,回車,比如洗完牌以後在某個節點抽牌,即便是一個新加入的節點/打牌者。

 

Ready to move 200 slots.

  Source nodes:

    M:68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001

   slots:0-5460(5461 slots) master

   1 additionalreplica(s)

    M:6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003

  slots:10923-16383 (5461 slots) master

   1 additionalreplica(s)

    M:f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002

   slots:5461-10922(5462 slots) master

   1 additionalreplica(s)

  Destination node:

    M:4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007

   slots: (0 slots)master

   0 additionalreplica(s)

 Resharding plan:#分片執行計劃

Moving slot 5461 fromf314dda271d135634d6849cdb649192b58b08d7f

..這個節點分配的

Moving slot 10923 from6385ebf9ea346525671b8c339614de4cb2a118cc

Moving slot 10924 from6385ebf9ea346525671b8c339614de4cb2a118cc

..這個節點分配的

Moving slot 0 from68e9252e2e2404a0ced500a98085acaa5754c7a2

..這個節點分配的

Do you want to proceed with theproposed reshard plan (yes/no)? yes

#輸入yes確認開始執行分片任務。

Moving slot 5461 from 172.16.1.11:7002 to172.16.1.11:7007:

。。。。。。

Moving slot 10923 from 172.16.1.11:7003 to172.16.1.11:7007:

。。。。。。

Moving slot 0 from 172.16.1.11:7001 to 172.16.1.11:7007:

。。。。。。

 

#查看集羣狀態

[root@redis redis-cluster]# redis-cli -c -h 172.16.1.11-p 7007
172.16.1.11:7007> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:7
cluster_stats_messages_sent:7616
cluster_stats_messages_received:7611
172.16.1.11:7007> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481388706933 2 connected
 
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481388706427 3 connected 10989-16383
 
4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007 myself,master - 0 0 7 connected 0-65 5461-5527 10923-10988
 
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481388707436 3 connected
 
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481388706932 1 connected
 
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481388705922 1 connected 66-5460
 
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481388706428 2 connected 5528-10922


 

#測試讀寫操做

#在7007進入
172.16.1.11:7007> set name17 name17val1
-> Redirected to slot [3305] located at172.16.1.11:7001
OK
#跳到7001
172.16.1.11:7001> keys *
1) "name17"
172.16.1.11:7001> get name17
"name17val1"
 
#在7001進入
#由於事先獲得了7003的值,就一直在7003下
172.16.1.11:7003> keys *
1) "name14"
2) "name11"
172.16.1.11:7003> get name17
-> Redirected to slot [3305] located at172.16.1.11:7001
"name17val1"
#能夠發現獲得哪一個節點的值,就會跳到對應節點去
172.16.1.11:7001> keys *
1) "name17"
172.16.1.11:7001> get name17
"name17val1"


 

 

16622添加從節點7008
[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb add-node 172.16.1.11:7008172.16.1.11:7001


#172.16.1.11:7008爲新節點,172.16.1.11:7001爲任意集羣中已存在的節點

(

也能夠直接使用

redis-trib.rb add-node --slave--master-id 4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7008172.16.1.11:7001

直接指定主節點,這樣就不用利用replicate命令再次指定主節點了。

)

 

>>> Adding node 172.16.1.11:7008 to cluster172.16.1.11:7001
>>> Performing Cluster Check (using node172.16.1.11:7001)
M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
   slots:66-5460(5395 slots) master
   1 additionalreplica(s)
S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
   slots: (0 slots)slave
   replicatesf314dda271d135634d6849cdb649192b58b08d7f
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
  slots:10989-16383 (5395 slots) master
   1 additionalreplica(s)
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
   slots: (0 slots)slave
   replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
M: 4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007
  slots:0-65,5461-5527,10923-10988 (199 slots[lly16] ) master
   0 additionalreplica(s)
M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002
   slots:5528-10922(5395 slots) master
   1 additionalreplica(s)
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
   slots: (0 slots)slave
   replicates6385ebf9ea346525671b8c339614de4cb2a118cc
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 172.16.1.11:7008to make it join the cluster.
[OK] New node added correctly.


 

#查看集羣狀態

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008
172.16.1.11:7008> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:8
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:0
cluster_stats_messages_sent:367
cluster_stats_messages_received:367
172.16.1.11:7008> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481389544668 2 connected 5528-10922
 
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481389544163 1 connected
 
4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007master - 0 1481389545171 7 connected 0-65 5461-5527 10923-10988
 
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481389543657 3 connected 10989-16383
 
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481389544667 1 connected 66-5460
 
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481389545673 3 connected


 

#下述爲新增的從節點還未指定主節點

769cee49ada729901b9e9270467aeceff494be13172.16.1.11:7008 myself,master - 0 0 0 connected

 

00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481389545674 2 connected

 

 

#指定7008從節點的主節點

#須要執行replicate命令來指定當前節點(從節點)的主節點id爲哪一個。

首先須要登陸新加的7008節點的客戶端,而後使用集羣命令進行操做,把當前的7008slave)節點指定到一個主節點下(這裏使用以前建立的7007主節點4a174a6203795d7f57f4be52534d41f01e164f84

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008
172.16.1.11:7008> cluster replicate4a174a6203795d7f57f4be52534d41f01e164f84
OK


#再次查看集羣信息

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008                                                    
172.16.1.11:7008> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:8
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:7
cluster_stats_messages_sent:3199
cluster_stats_messages_received:3199
172.16.1.11:7008> cluster nodes
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481390061799 2 connected 5528-10922
 
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481390061800 1 connected
 
4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007master - 0 1481390059784 7 connected 0-65 5461-5527 10923-10988
 
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481390060289 3 connected 10989-16383
 
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481390059784 1 connected 66-5460
 
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481390060791 3 connected
 
#能夠看到7008節點已是7007主節點的從節點了
769cee49ada729901b9e9270467aeceff494be13172.16.1.11:7008 myself,slave 4a174a6203795d7f57f4be52534d41f01e164f84 0 0 0connected
 
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481390061294 2 connected


 

#測試節點信息

172.16.1.11:7008> keys *
(empty list or set)
172.16.1.11:7008> get name17
-> Redirected to slot [3305] located at172.16.1.11:7001
"name17val1"
172.16.1.11:7001>
172.16.1.11:7008> set name18 name18val1
-> Redirected to slot [15622] located at 172.16.1.11:7003
OK
172.16.1.11:7003> get name18
"name18val1"
172.16.1.11:7003> keys *
1) "name14"
2) "name11"
3) "name18"


 

 

#若是在新增從節點時的錯誤,解決以下:

[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb add-node --slave --master-id4a174a6203795d7f57f4be52534d41f01e164f84

報錯:

[ERR] Node 172.16.1.11:7008 isnot empty. Either the node already knows other nodes (check with CLUSTER NODES)or contains some key in database 0.

1、在停掉對應redis節點服務後,刪除對應目錄下的appendonly.aof、dump.rdb、nodes-7001.conf,從新啓動redis-server後再次以後新增節點操做便可。

 

1663刪除節點

16631刪除從節點7008

#769cee49ada729901b9e9270467aeceff494be13爲7008的ID

[root@redis ~]# /home/lly/tools/redis-3.2.5/src/redis-trib.rbdel-node 172.16.1.11:7008 769cee49ada729901b9e9270467aeceff494be13
>>> Removing node769cee49ada729901b9e9270467aeceff494be13 from cluster 172.16.1.11:7008
>>> Sending CLUSTER FORGET messages to thecluster...
>>> SHUTDOWN the node.


 

#測試是否能進入7008的redis的環境

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7008
Could not connect to Redis at 172.16.1.11:7008:Connection refused
Could not connect to Redis at 172.16.1.11:7008:Connection refused


 

#測試是否有7008進程,在刪除節點的同時服務也被關掉了

[root@redis ~]# ps -ef|grep redis|grep -v grep
root      5372      1  0 00:21 ?        00:00:15 redis-server 172.16.1.11:7007[cluster]                      
root      5394      1  0 00:26 ?        00:00:15 redis-server 172.16.1.11:7001[cluster]                      
root      5398      1  0 00:26 ?        00:00:15 redis-server 172.16.1.11:7002[cluster]                      
root      5402      1  0 00:26 ?        00:00:15 redis-server 172.16.1.11:7003[cluster]                      
root      5406      1  0 00:26 ?        00:00:14 redis-server 172.16.1.11:7004[cluster]                      
root      5410      1  0 00:26 ?        00:00:14 redis-server 172.16.1.11:7005[cluster]                      
root      5414      1  0 00:26 ?        00:00:14 redis-server 172.16.1.11:7006[cluster]


 

#查看集羣信息

172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:4
cluster_current_epoch:7
cluster_my_epoch:1
cluster_stats_messages_sent:21816
cluster_stats_messages_received:21807
172.16.1.11:7001> cluster nodes
00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481391195529 5 connected
 
6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481391194014 3 connected 10989-16383
 
37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481391194519 4 connected
 
4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007master - 0 1481391194519 7 connected 0-65 5461-5527 10923-10988
 
68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 1 connected 66-5460
 
f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481391196028 2 connected 5528-10922
 
8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481391195022 6 connected


 

#測試在7008set的值是否還存在,對應「1六、六、二、2添加從節點7008中的name18」

#在7003測試

172.16.1.11:7003> keys *
1) "name14"
2) "name11"
3) "name18"
# 雖然從節點刪除了,可是從節點添加的name18仍是存在的
172.16.1.11:7003> get name18
"name18val1"
172.16.1.11:7003> get name18
"name18val1"


 

 

 

 

16632刪除主節點7007

  #在刪除7007主節點以前,必須先把其所有的數據(slot槽)移動到其餘節點中去,而後再進行移除節點操做才行,否則會出現丟失數據的問題。(目前只能把數據遷移到一個節點上,暫時作不了平均分配功能。)

 

166321主節點下沒有從節點

#取消分配的slot過程

[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb reshard 172.16.1.11:7007
>>> Performing Cluster Check (using node172.16.1.11:7007)


M:4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007

  slots:0-65,5461-5527,10923-10988 (199 slots)master

  0 additional replica(s)

M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001
   slots:66-5460(5395 slots) master
   1 additionalreplica(s)
S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006
   slots: (0 slots)slave
   replicates6385ebf9ea346525671b8c339614de4cb2a118cc
M: f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002
   slots:5528-10922(5395 slots) master
   1 additionalreplica(s)
S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004
   slots: (0 slots)slave
   replicates68e9252e2e2404a0ced500a98085acaa5754c7a2
M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003
  slots:10989-16383 (5395 slots) master
   1 additionalreplica(s)
S: 00371b626a2d42654a0cebb985311aee56578142172.16.1.11:7005
   slots: (0 slots)slave
   replicatesf314dda271d135634d6849cdb649192b58b08d7f
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.


How many slots do you want tomove (from 1 to 16384)? 199

#注意這裏的槽數是199,不會恰好是200個槽 

What is the receiving node ID? 68e9252e2e2404a0ced500a98085acaa5754c7a2

#這裏輸入的是7001的ID,把槽移動到7001

Please enter all the source node IDs.

  Type 'all' to useall the nodes as source nodes for the hash slots.

  Type 'done' onceyou entered all the source nodes IDs.

Source node #1:4a174a6203795d7f57f4be52534d41f01e164f84

#這裏是要移除的7007主節點的ID

Source node #2:done

#直接輸入done,開始生成遷移計劃

Ready to move 199 slots.

  Source nodes:

    M:4a174a6203795d7f57f4be52534d41f01e164f84 172.16.1.11:7007

  slots:0-65,5461-5527,10923-10988 (199 slots) master

   0 additionalreplica(s)

  Destination node:

    M:68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001

   slots:66-5460(5395 slots) master

   1 additionalreplica(s)

  Resharding plan:

    Moving slot 0from 4a174a6203795d7f57f4be52534d41f01e164f84

。。。。。。。。移除的slots

    Moving slot10988 from 4a174a6203795d7f57f4be52534d41f01e164f84

Do you want to proceed with theproposed reshard plan (yes/no)? yes

#輸入yes表示開始遷移

Moving slot 0 from 172.16.1.11:7007 to 172.16.1.11:7001:

。。。。。。。。移除的slots

Moving slot 10988 from 172.16.1.11:7007 to172.16.1.11:7001:

 

 

#查看集羣信息

172.16.1.11:7001> cluster info

cluster_state:ok

cluster_slots_assigned:16384

cluster_slots_ok:16384

cluster_slots_pfail:0

cluster_slots_fail:0

cluster_known_nodes:7

cluster_size:3

cluster_current_epoch:8

cluster_my_epoch:8

cluster_stats_messages_sent:7598

cluster_stats_messages_received:7592

172.16.1.11:7001> cluster nodes


4a174a6203795d7f57f4be52534d41f01e164f84172.16.1.11:7007 master - 0 1481393144924 7 connected

 

00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481393144412 5 connected

 

8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481393143402 6 connected

 

f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481393143908 2 connected 5528-10922

 

#能夠發現7008的槽已經分配的到了7001

68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001 myself,master - 0 0 8 connected 0-552710923-10988

#未把7007的槽移給7001以前7001的槽數爲

68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001 myself,master - 0 0 1 connected 66-5460

 

6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481393142899 3 connected 10989-16383

 

37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481393143403 8 connected

 

#查看集羣進程

[root@redis ~]# ps -ef|grep redis|grep -v grep
root      5613      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7001[cluster]                      
root      5615      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7002[cluster]                      
root      5621      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7003[cluster]                      
root      5625      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7004[cluster]                      
root      5629      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7005[cluster]                      
root      5633      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7006[cluster]

           

#如今7007進程仍是存在的

root       5635     1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7007[cluster]  

 

#刪除7007主節點

[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb del-node 172.16.1.11:70074a174a6203795d7f57f4be52534d41f01e164f84

>>> Removing node4a174a6203795d7f57f4be52534d41f01e164f84 from cluster 172.16.1.11:7007

>>> Sending CLUSTER FORGET messages to thecluster...

>>> SHUTDOWN the node.

 

#查看集羣信息,已經沒有7007節點信息了

172.16.1.11:7001> cluster info

cluster_state:ok

cluster_slots_assigned:16384

cluster_slots_ok:16384

cluster_slots_pfail:0

cluster_slots_fail:0

cluster_known_nodes:6

cluster_size:3

cluster_current_epoch:8

cluster_my_epoch:8

cluster_stats_messages_sent:9384

cluster_stats_messages_received:9378

172.16.1.11:7001> cluster nodes

00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481393501468 5 connected

8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481393500456 6 connected

f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481393500965 2 connected 5528-10922

68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001myself,master - 0 0 8 connected 0-5527 10923-10988

6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481393501468 3 connected 10989-16383

37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481393499450 8 connected

 

#查看redis進程,7007進程自動中止了

[root@redis ~]# ps -ef|grep redis|grep -v grep
root      5613      1  0 01:41 ?        00:00:07 redis-server 172.16.1.11:7001[cluster]                      
root      5615      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7002[cluster]                      
root      5621      1  0 01:41 ?        00:00:07 redis-server 172.16.1.11:7003[cluster]                      
root      5625      1  0 01:41 ?        00:00:07 redis-server 172.16.1.11:7004[cluster]                      
root      5629      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7005[cluster]                      
root      5633      1  0 01:41 ?        00:00:06 redis-server 172.16.1.11:7006[cluster]


 

 

 

 

166321主節點下有從節點

#首先把新添加7007主節點和7008從節點,參考步驟「1六、六、2添加節點」

 

#移除前的節點信息

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001

172.16.1.11:7008> cluster nodes

37384b8db7e9e5462d2da236d0104207eac26060 172.16.1.11:7004slave 68e9252e2e2404a0ced500a98085acaa5754c7a2 0 1481393974867 8 connected

 

8c965e903668a7947727edd8ba9288205b0190cc172.16.1.11:7008 myself,slave 3724b337e95391596114f25f855f97b973994ca3 0 0 0connected

 

68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001master - 0 1481393974364 8 connected 69-5527 10923-10988

 

3724b337e95391596114f25f855f97b973994ca3172.16.1.11:7007 master - 0 1481393973859 9 connected 0-68 5528-559210989-11053

 

00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481393973864 2 connected

 

f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481393974364 2 connected 5593-10922

 

8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481393972856 3 connected

 

6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481393973355 3 connected 11054-16383

 

#取消分配slot

[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb reshard 172.16.1.11:7007


>>> Performing Cluster Check (using node172.16.1.11:7007)

M:3724b337e95391596114f25f855f97b973994ca3 172.16.1.11:7007

  slots:0-68,5528-5592,10989-11053 (199 slots)master

  1 additional replica(s)

M: 6385ebf9ea346525671b8c339614de4cb2a118cc172.16.1.11:7003

  slots:11054-16383 (5330 slots) master

   1 additionalreplica(s)

S: 00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005

   slots: (0 slots)slave

   replicatesf314dda271d135634d6849cdb649192b58b08d7f

M: 68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001

  slots:69-5527,10923-10988 (5525 slots) master

   1 additionalreplica(s)

S: 8c965e903668a7947727edd8ba9288205b0190cc172.16.1.11:7008

   slots: (0 slots)slave

   replicates3724b337e95391596114f25f855f97b973994ca3

S: 8dc598b1ceb81e397903973eeba02e3e53770b0c172.16.1.11:7006

   slots: (0 slots)slave

   replicates6385ebf9ea346525671b8c339614de4cb2a118cc

S: 37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004

   slots: (0 slots)slave

   replicates68e9252e2e2404a0ced500a98085acaa5754c7a2

M: f314dda271d135634d6849cdb649192b58b08d7f172.16.1.11:7002

   slots:5593-10922(5330 slots) master

   1 additionalreplica(s)

[OK] All nodes agree about slots configuration.

>>> Check for open slots...

>>> Check slots coverage...

[OK] All 16384 slots covered.

How many slots do you want tomove (from 1 to 16384)? 199

What is the receiving node ID? 68e9252e2e2404a0ced500a98085acaa5754c7a2

Please enter all the source node IDs.

  Type 'all' to useall the nodes as source nodes for the hash slots.

  Type 'done' onceyou entered all the source nodes IDs.

Source node #1:3724b337e95391596114f25f855f97b973994ca3

Source node #2:done

Ready to move 199 slots.

  Source nodes:

    M:3724b337e95391596114f25f855f97b973994ca3 172.16.1.11:7007

  slots:0-68,5528-5592,10989-11053 (199 slots) master

   1 additionalreplica(s)

  Destination node:

    M:68e9252e2e2404a0ced500a98085acaa5754c7a2 172.16.1.11:7001

  slots:69-5527,10923-10988 (5525 slots) master

   1 additionalreplica(s)

  Resharding plan:

Moving slot 0 from3724b337e95391596114f25f855f97b973994ca3

。。移除的其餘節點

Moving slot 11053 from 3724b337e95391596114f25f855f97b973994ca3

Do you want to proceed with theproposed reshard plan (yes/no)? yes

Moving slot 0 from 172.16.1.11:7007 to 172.16.1.11:7001:

。。移除的其餘節點

Moving slot 11053 from 172.16.1.11:7007 to172.16.1.11:7001:

 

#刪除7007節點ID

[root@redis ~]#/home/lly/tools/redis-3.2.5/src/redis-trib.rb del-node 172.16.1.11:70073724b337e95391596114f25f855f97b973994ca3
>>> Removing node3724b337e95391596114f25f855f97b973994ca3 from cluster 172.16.1.11:7007
>>> Sending CLUSTER FORGET messages to thecluster...
>>> SHUTDOWN the node.


 

#查看集羣節點信息

[root@redis ~]# redis-cli -c -h 172.16.1.11 -p 7001


172.16.1.11:7001> cluster nodes

00371b626a2d42654a0cebb985311aee56578142 172.16.1.11:7005slave f314dda271d135634d6849cdb649192b58b08d7f 0 1481394169450 5 connected

 

#能夠發現7008的主節點變成了7001

8c965e903668a7947727edd8ba9288205b0190cc172.16.1.11:7008 slave 68e9252e2e2404a0ced500a98085acaa5754c7a20 1481394169952 10 connected

 

8dc598b1ceb81e397903973eeba02e3e53770b0c 172.16.1.11:7006slave 6385ebf9ea346525671b8c339614de4cb2a118cc 0 1481394170457 6 connected

 

f314dda271d135634d6849cdb649192b58b08d7f 172.16.1.11:7002master - 0 1481394170457 2 connected 5593-10922

 

#此時7001有兩個從節點

68e9252e2e2404a0ced500a98085acaa5754c7a2172.16.1.11:7001 myself,master - 0 0 10 connected 0-5592 10923-11053

 

6385ebf9ea346525671b8c339614de4cb2a118cc 172.16.1.11:7003master - 0 1481394171466 3 connected 11054-16383

 

37384b8db7e9e5462d2da236d0104207eac26060172.16.1.11:7004 slave 68e9252e2e2404a0ced500a98085acaa5754c7a20 1481394170963 10 connected

172.16.1.11:7001> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:7
cluster_size:3
cluster_current_epoch:10
cluster_my_epoch:10
cluster_stats_messages_sent:12873
cluster_stats_messages_received:12860


 

#7007進程已經停掉了

[root@redis ~]# ps -ef|grep redis|grep -v grep
root      5613      1  0 01:41 ?        00:00:11 redis-server 172.16.1.11:7001[cluster]                      
root      5615      1  0 01:41 ?        00:00:10 redis-server 172.16.1.11:7002[cluster]                      
root      5621      1  0 01:41 ?        00:00:10 redis-server 172.16.1.11:7003[cluster]                      
root      5625      1  0 01:41 ?        00:00:10 redis-server 172.16.1.11:7004[cluster]                      
root      5629      1  0 01:41 ?        00:00:10 redis-server 172.16.1.11:7005[cluster]                      
root      5633      1  0 01:41 ?        00:00:10 redis-server 172.16.1.11:7006[cluster]                      
root      5752      1  0 02:16 ?        00:00:02 redis-server 172.16.1.11:7008[cluster]
相關文章
相關標籤/搜索