複製集是額外的數據副本,是跨多個服務器同步數據的過程,複製集提供了冗餘並增長了數據可用性,經過複製集能夠對硬件故障和中斷的服務進行恢復。mongodb
1.配置複製集shell
(1)建立數據文件和日誌文件存儲路徑數據庫
[root@localhost ~]# mkdir -p /data/mongodb/mongodb{2,3,4} [root@localhost ~]# cd /data/mongodb/ [root@localhost mongodb]# mkdir logs [root@localhost mongodb]# touch logs/mongodb{2,3,4}.log [root@localhost mongodb]# cd logs/ [root@localhost logs]# ls mongodb2.log mongodb3.log mongodb4.log [root@localhost logs]# chmod 777 *.log
(2)編輯4個MongoDB實例的配置文件vim
先編輯Mongodb的配置文件,配置replSet參數值都爲kgcrs,並複製3份,具體操做以下:服務器
[root@localhost etc]# vim mongod.conf path: /var/log/mongodb/mongod.log # Where and how to store data. storage: dbPath: /var/lib/mongo journal: enabled: true # engine: # mmapv1: # wiredTiger: # how the process runs processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile timeZoneInfo: /usr/share/zoneinfo # network interfaces net: port: 27017 bindIp: 0.0.0.0 # Listen to local interface only, comment to listen on all interfaces. #security: #operationProfiling: replication: replSetName: kgcrs #sharding: ## Enterprise-Only Options #auditLog: #snmp:
而後將mongodb2.conf中的port參數配置爲27018,mongodb3.conf中的port參數配置爲27019,mongodb4.conf中的port參數配置爲27020。一樣也將dbpath和logpath參數修改成對應的路徑值。app
(3)啓動4個MongoDB節點實列並查看進程信息tcp
[root@localhost etc]# mongod -f /etc/mongod.conf --shutdown //先關閉// [root@localhost etc]# mongod -f /etc/mongod.conf //再開啓// [root@localhost etc]# mongod -f /etc/mongod2.conf [root@localhost etc]# mongod -f /etc/mongod3.conf [root@localhost etc]# mongod -f /etc/mongod4.conf [root@localhost etc]# netstat -ntap | grep mongod tcp 0 0 0.0.0.0:27019 0.0.0.0:* LISTEN 17868/mongod tcp 0 0 0.0.0.0:27020 0.0.0.0:* LISTEN 17896/mongod tcp 0 0 0.0.0.0:27017 0.0.0.0:* LISTEN 17116/mongod tcp 0 0 0.0.0.0:27018 0.0.0.0:* LISTEN 17413/mongod
(4)配置三個節點的複製集ide
[root@localhost etc]# mongo > rs.status() //查看複製集// { "info" : "run rs.initiate(...) if not yet done for the set", "ok" : 0, "errmsg" : "no replset config has been received", "code" : 94, "codeName" : "NotYetInitialized", "$clusterTime" : { "clusterTime" : Timestamp(0, 0), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } } } > cfg={"_id":"kgcrs","members":[{"_id":0,"host":"192.168.126.132:27017"},{"_id":1,"host":"192.168.126.132:27018"},{"_id":2,"host":"192.168.126.132:27019"}]} //添加複製集// { "_id" : "kgcrs", "members" : [ { "_id" : 0, "host" : "192.168.126.132:27017" }, { "_id" : 1, "host" : "192.168.126.132:27018" }, { "_id" : 2, "host" : "192.168.126.132:27019" } ] } > rs.initiate(cfg) //初始化配置時保證從節點沒有數據//
(5)查看複製集狀態prototype
啓動複製集後,再次經過rs.status()命令查看複製集的完整狀態信息日誌
kgcrs:SECONDARY> rs.status() { "set" : "kgcrs", "date" : ISODate("2018-07-17T07:18:52.047Z"), "myState" : 1, "term" : NumberLong(1), "syncingTo" : "", "syncSourceHost" : "", "syncSourceId" : -1, "heartbeatIntervalMillis" : NumberLong(2000), "optimes" : { "lastCommittedOpTime" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "readConcernMajorityOpTime" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "appliedOpTime" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "durableOpTime" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) } }, "members" : [ { "_id" : 0, "name" : "192.168.126.132:27017", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", //主節點// "uptime" : 2855, "optime" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2018-07-17T07:18:48Z"), "syncingTo" : "", "syncSourceHost" : "", "syncSourceId" : -1, "infoMessage" : "could not find member to sync from", "electionTime" : Timestamp(1531811847, 1), "electionDate" : ISODate("2018-07-17T07:17:27Z"), "configVersion" : 1, "self" : true, "lastHeartbeatMessage" : "" }, { "_id" : 1, "name" : "192.168.126.132:27018", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", //從節點// "uptime" : 95, "optime" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "optimeDurable" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2018-07-17T07:18:48Z"), "optimeDurableDate" : ISODate("2018-07-17T07:18:48Z"), "lastHeartbeat" : ISODate("2018-07-17T07:18:51.208Z"), "lastHeartbeatRecv" : ISODate("2018-07-17T07:18:51.720Z"), "pingMs" : NumberLong(0), "lastHeartbeatMessage" : "", "syncingTo" : "192.168.126.132:27017", "syncSourceHost" : "192.168.126.132:27017", "syncSourceId" : 0, "infoMessage" : "", "configVersion" : 1 }, { "_id" : 2, "name" : "192.168.126.132:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", //從節點// "uptime" : 95, "optime" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "optimeDurable" : { "ts" : Timestamp(1531811928, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2018-07-17T07:18:48Z"), "optimeDurableDate" : ISODate("2018-07-17T07:18:48Z"), "lastHeartbeat" : ISODate("2018-07-17T07:18:51.208Z"), "lastHeartbeatRecv" : ISODate("2018-07-17T07:18:51.822Z"), "pingMs" : NumberLong(0), "lastHeartbeatMessage" : "", "syncingTo" : "192.168.126.132:27017", "syncSourceHost" : "192.168.126.132:27017", "syncSourceId" : 0, "infoMessage" : "", "configVersion" : 1 } ], "ok" : 1, "operationTime" : Timestamp(1531811928, 1), "$clusterTime" : { "clusterTime" : Timestamp(1531811928, 1), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } } }
其中,health爲1表明健康,0表明宕機。state爲1表明主節點,爲2表明從節點。
在複製集初始化配置時要保證從節點上沒有數據
MongoDB複製集能夠實現羣集的高可用,當其中主節點出現故障時會自動切換到其餘節點。也可手動進行復制集的主從切換。
1.故障轉移切換
[root@localhost etc]# ps aux | grep mongod //查看進程// root 17116 1.2 5.8 1546916 58140 ? Sl 14:31 0:51 mongod -f /etc/mongod.conf root 17413 1.0 5.7 1445624 57444 ? Sl 14:34 0:39 mongod -f /etc/mongod2.conf root 17868 1.2 5.5 1446752 55032 ? Sl 15:05 0:23 mongod -f /etc/mongod3.conf root 17896 0.8 4.7 1037208 47552 ? Sl 15:05 0:16 mongod -f /etc/mongod4.conf root 18836 0.0 0.0 112676 980 pts/1 S+ 15:38 0:00 grep --color=auto mongod [root@localhost etc]# kill -9 17116 ///殺死27017進程// [root@localhost etc]# ps aux | grep mongod root 17413 1.0 5.7 1453820 57456 ? Sl 14:34 0:40 mongod -f /etc/mongod2.conf root 17868 1.2 5.5 1454948 55056 ? Sl 15:05 0:24 mongod -f /etc/mongod3.conf root 17896 0.8 4.7 1037208 47552 ? Sl 15:05 0:16 mongod -f /etc/mongod4.conf root 18843 0.0 0.0 112676 976 pts/1 R+ 15:38 0:00 grep --color=auto mongod [root@localhost etc]# mongo --port 27019 kgcrs:PRIMARY> rs.status() "members" : [ { "_id" : 0, "name" : "192.168.126.132:27017", "health" : 0, //宕機狀態// "state" : 8, "stateStr" : "(not reachable/healthy)", "uptime" : 0, "optime" : { "ts" : Timestamp(0, 0), "t" : NumberLong(-1) { "_id" : 1, "name" : "192.168.126.132:27018", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", //從節點// "uptime" : 1467, "optime" : { "ts" : Timestamp(1531813296, 1), "t" : NumberLong(2) }, "optimeDurable" : { "ts" : Timestamp(1531813296, 1), "t" : NumberLong(2) }, { "_id" : 2, "name" : "192.168.126.132:27019", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", //主節點// "uptime" : 2178, "optime" : { "ts" : Timestamp(1531813296, 1), "t" : NumberLong(2) }
2.手動進行主從切換
kgcrs:PRIMARY> rs.freeze(30) //暫停30s不參與選舉 kgcrs:PRIMARY> rs.stepDown(60,30) //交出主節點位置,維持從節點狀態很多於60秒,等待30秒使主節點和從節點日誌同步 2018-07-17T15:46:19.079+0800 E QUERY [thread1] Error: error doing query: failed: network error while attempting to run command 'replSetStepDown' on host '127.0.0.1:27019' : DB.prototype.runCommand@src/mongo/shell/db.js:168:1 DB.prototype.adminCommand@src/mongo/shell/db.js:186:16 rs.stepDown@src/mongo/shell/utils.js:1341:12 @(shell):1:1 2018-07-17T15:46:19.082+0800 I NETWORK [thread1] trying reconnect to 127.0.0.1:27019 (127.0.0.1) failed 2018-07-17T15:46:19.085+0800 I NETWORK [thread1] reconnect 127.0.0.1:27019 (127.0.0.1) ok kgcrs:SECONDARY> //交出主節點後立馬變成從節點// kgcrs:SECONDARY> rs.status() "_id" : 0, "name" : "192.168.126.132:27017", "health" : 0, //宕機狀態// "state" : 8, "stateStr" : "(not reachable/healthy)", "uptime" : 0, "optime" : { "ts" : Timestamp(0, 0), "t" : NumberLong(-1) }, { "_id" : 1, "name" : "192.168.126.132:27018", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", //主節點狀態// "uptime" : 1851, "optime" : { "ts" : Timestamp(1531813679, 1), "t" : NumberLong(3) { "_id" : 2, "name" : "192.168.126.132:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", //從節點狀態// "uptime" : 2563, "optime" : { "ts" : Timestamp(1531813689, 1), "t" : NumberLong(3)
節點類型分爲標準節點(host)、被動節點(passive)和仲裁節點(arbiter)。
1.配置複製集的優先級
1)從新配置4個節點的MongoDB複製集,設置兩個標準節點,一個被動節點和一個仲裁節點。
[root@localhost etc]# mongo > cfg={"_id":"kgcrs","members":[{"_id":0,"host":"192.168.126.132:27017","priority":100},{"_id":1,"host":"192.168.126.132:27018","priority":100},{"_id":2,"host":"192.168.126.132:27019","priority":0},{"_id":3,"host":"192.168.126.132:27020","arbiterOnly":true}]} > rs.initiate(cfg) //從新配置// kgcrs:SECONDARY> rs.isMaster() { "hosts" : [ //標準節點// "192.168.126.132:27017", "192.168.126.132:27018" ], "passives" : [ //被動節點// "192.168.126.132:27019" ], "arbiters" : [ //仲裁節點// "192.168.126.132:27020"
2)模擬主節點故障
若是主節點出現故障,另外一個標準節點將會選舉成爲新的主節點
[root@localhost etc]# mongod -f /etc/mongod.conf --shutdown //標準節點27017// [root@localhost etc]# mongo --port 27018 //此時會選舉第二個標準節點爲主節點// kgcrs:PRIMARY> rs.status() "_id" : 0, "name" : "192.168.126.132:27017", "health" : 0, //宕機狀態// "state" : 8, "stateStr" : "(not reachable/healthy)", "uptime" : 0, "optime" : { "ts" : Timestamp(0, 0), "t" : NumberLong(-1) "_id" : 1, "name" : "192.168.126.132:27018", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", //標準節點// "uptime" : 879, "optime" : { "ts" : Timestamp(1531817473, 1), "t" : NumberLong(2) "_id" : 2, "name" : "192.168.126.132:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", //被動節點// "uptime" : 569, "optime" : { "ts" : Timestamp(1531817473, 1), "t" : NumberLong(2) "_id" : 3, "name" : "192.168.126.132:27020", "health" : 1, "state" : 7, "stateStr" : "ARBITER", //仲裁節點// "uptime" : 569,
3)模擬全部標準節點出現故障
全部標準節點都出現故障,被動節點也不能成爲主節點
[root@localhost etc]# mongod -f /etc/mongod2.conf --shutdown //關閉標準節點27018// [root@localhost etc]# mongo --port 27019 kgcrs:SECONDARY> rs.status() "_id" : 0, "name" : "192.168.126.132:27017", "health" : 0, //宕機狀態// "state" : 8, "stateStr" : "(not reachable/healthy)", "uptime" : 0, "_id" : 1, "name" : "192.168.126.132:27018", "health" : 0, //宕機狀態// "state" : 8, "stateStr" : "(not reachable/healthy)", "uptime" : 0, "_id" : 2, "name" : "192.168.126.132:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", //被動節點// "uptime" : 1403, "_id" : 3, "name" : "192.168.126.132:27020", "health" : 1, "state" : 7, "stateStr" : "ARBITER", //仲裁節點//
1.配置容許在從節點讀取數據
默認MongoDB複製集的從節點不能讀取數據,可使用rs.slaveOk()命令容許可以在從節點讀取數據。
[root@localhost etc]# mongo --port 27017 kgcrs:SECONDARY> show dbs //讀取不到數據庫信息// 2018-07-17T17:11:31.570+0800 E QUERY [thread1] Error: listDatabases failed:{ "operationTime" : Timestamp(1531818690, 1), "ok" : 0, "errmsg" : "not master and slaveOk=false", "code" : 13435, "codeName" : "NotMaste kgcrs:SECONDARY> rs.slaveOk() kgcrs:SECONDARY> show dbs admin 0.000GB config 0.000GB local 0.000GB
2.查看複製狀態信息
可使用 rs.printReplicationInfo()和rs.printSlaveReplicationInfo()命令查看複製集狀態。
kgcrs:SECONDARY> rs.printReplicationInfo() configured oplog size: 990MB log length start to end: 2092secs (0.58hrs) oplog first event time: Tue Jul 17 2018 16:41:48 GMT+0800 (CST) oplog last event time: Tue Jul 17 2018 17:16:40 GMT+0800 (CST) now: Tue Jul 17 2018 17:16:46 GMT+0800 (CST) kgcrs:SECONDARY> rs.printSlaveReplicationInfo() source: 192.168.126.132:27017 syncedTo: Tue Jul 17 2018 17:16:50 GMT+0800 (CST) 0 secs (0 hrs) behind the primary source: 192.168.126.132:27019 syncedTo: Tue Jul 17 2018 17:16:50 GMT+0800 (CST) 0 secs (0 hrs) behind the primary
3.部署認證複製
kgcrs:PRIMARY> use admin kgcrs:PRIMARY> db.createUser({"user":"root","pwd":"123","roles":["root"]}) [root@localhost ~]# vim /etc/mongod.conf //分別編輯四個配置文件// .... security: keyFile: /usr/bin/kgcrskey1 //驗證路徑// clusterAuthMode: keyFile //驗證類型// [root@localhost ~]# vim /etc/mongod2.conf [root@localhost ~]# vim /etc/mongod3.conf [root@localhost ~]# vim /etc/mongod4.conf [root@localhost bin]# echo "kgcrs key"> kgcrskey1 //生成4個實例的密鑰文件// [root@localhost bin]# echo "kgcrs key"> kgcrskey2 [root@localhost bin]# echo "kgcrs key"> kgcrskey3 [root@localhost bin]# echo "kgcrs key"> kgcrskey4 [root@localhost bin]# chmod 600 kgcrskey{1..4} [root@localhost bin]# mongod -f /etc/mongod.conf //重啓4個實例// [root@localhost bin]# mongod -f /etc/mongod2.conf [root@localhost bin]# mongod -f /etc/mongod3.conf [root@localhost bin]# mongod -f /etc/mongod4.conf [root@localhost bin]# mongo --port 27017 //進入標準節點中// kgcrs:PRIMARY> show dbs //沒法查看數據庫// kgcrs:PRIMARY> rs.status() //沒法查看複製集// kgcrs:PRIMARY> use admin //身份登陸驗證// kgcrs:PRIMARY> db.auth("root","123") kgcrs:PRIMARY> show dbs //能夠查看數據庫// admin 0.000GB config 0.000GB local 0.000GB kgcrs:PRIMARY> rs.status() //能夠查看複製集// "_id" : 0, "name" : "192.168.126.132:27017", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 411, "_id" : 1, "name" : "192.168.126.132:27018", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 324, "_id" : 2, "name" : "192.168.126.132:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 305, "_id" : 3, "name" : "192.168.126.132:27020", "health" : 1, "state" : 7, "stateStr" : "ARBITER", "uptime" : 280,