mongodb 3.4 數據下載地址:https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-3.4.4.tgznode
192.168.0.188:27017(primary) 主節點 port=27017 #端口 dbpath= /usr/src/node1/data#數據文件存放目錄 logpath= /usr/src/node1/mongodb.log #日誌文件存放目錄 logappend=true #使用追加的方式寫日誌 fork=true #以守護程序的方式啓用,即在後臺運行 maxConns=500 #最大同時鏈接數 bind_ip=0.0.0.0 #只容許經過本機訪問 noauth=true #不啓用驗證 #auth=true #啓用驗證 replSet=test # 副本集名稱 oplogSize=200 # 200M keyFile = /usr/src/mongodb.key 192.168.0.188:27018(secondary) 從節點 port=27018 #端口 dbpath= /usr/src/node2/data#數據文件存放目錄 logpath= /usr/src/node2/mongodb.log #日誌文件存放目錄 logappend=true #使用追加的方式寫日誌 fork=true #以守護程序的方式啓用,即在後臺運行 maxConns=500 #最大同時鏈接數 bind_ip=0.0.0.0 #只容許經過本機訪問 #noauth=true #不啓用驗證 #auth=true #啓用驗證 replSet=test # 副本集名稱 oplogSize=200 # 200M keyFile = /usr/src/mongodb.key 192.168.0.188:27019(secondary) port=27019 #端口 dbpath= /usr/src/node3/data#數據文件存放目錄 logpath= /usr/src/node3/mongodb.log #日誌文件存放目錄 logappend=true #使用追加的方式寫日誌 fork=true #以守護程序的方式啓用,即在後臺運行 maxConns=500 #最大同時鏈接數 bind_ip=0.0.0.0 #只容許經過本機訪問 #noauth=true #不啓用驗證 #auth=true #啓用驗證 replSet=test # 副本集名稱 oplogSize=200 # 200M keyFile = /usr/src/mongodb.key
mongod --config /usr/src/node1/mongodb.conf #啓動主節點 鏈接主,在主上運行命令mongo #建立認證的用戶名admin跟密碼123456 # db.createUser({user:"admin",pwd:"123456", roles:[{role:"root",db:"admin"}]}); >use admin >db.createUser( ... { ... user:"admin", ... pwd:"123456", ... roles:[{role:"root",db:"admin"}] ... } ... ); #配置認證之後須要打開主節點的配置文件選擇啓動驗證 #而後配置一個密鑰 root@ubuntu:openssl rand -base64 1024 /usr/src/mongodb.key root@ubuntu:chmod 600 /usr/src/mongodb.key #而後開啓所有節點 mongodb --config /usr/src/node1/mongodb.conf mongodb --config /usr/src/node2/mongodb.conf mongodb --config /usr/src/node3/mongodb.conf #進入主節點,驗證密碼 >use admin >db.auth('admin','123456') 1 #而後配置副本集 priority 越大 成爲 主的權重越大 > config={_id:"test",members:[{_id:0,host:"192.168.0.188:27017,priority:10"},{_id:1,host:"192.168.0.188:27018,priority:11"},{_id:2,host:"192.168.0.188:27019,priority:12"}]} >rs.initiate(config) #初始化配置 rs.status() //查看狀態 { "members" : [ { "_id" : 0, "name" : "192.168.0.188:27017", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 9807, }, { "_id" : 1, "name" : "192.168.0.188:27018", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 9715, }, { "_id" : 2, "name" : "192.168.0.188:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 9711, } ], "ok" : 1 } cfg = rs.conf() # 配置延遲備份節點 cfg.members[2].priority = 0 cfg.members[2].hidden = true cfg.members[2].slaveDelay = 3600 #延遲多少秒 #從新配置使生效 rs.reconfig(cfg)
#首先咱們停掉主節點 root 10049 0.6 0.5 739284 93844 ? Sl 12:10 1:13 mongod --config /usr/src/node1/mongodb.conf root 10137 0.6 0.6 743792 100336 ? Sl 12:10 1:12 mongod --config /usr/src/node2/mongodb.conf root 10221 0.5 0.3 545516 61560 ? Sl 12:10 1:03 mongod --config /usr/src/node3/mongodb.conf kill -9 10049 #停掉主節點,而後啓動從節點 mongo 192.168.0.188:27018 { "_id" : 0, "name" : "192.168.0.188:27017", "health" : 0, "state" : 8, "stateStr" : "(not reachable/healthy)", "uptime" : 0, }, { "_id" : 1, "name" : "192.168.0.188:27018", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", # 從節點已經代替主節點 "uptime" : 11151, }, { "_id" : 2, "name" : "192.168.0.188:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 11140, } # 從節點已經代替主節點,那在啓動主節點看看有沒有自動切換 rs.status() { "_id" : 0, "name" : "192.168.0.188:27017", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", #已經切換主節點 "uptime" : 105, }, { "_id" : 1, "name" : "192.168.0.188:27018", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 104, }, { "_id" : 2, "name" : "192.168.0.188:27019", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", } 能夠看到 已經切換到主節點了
建立兩個測試表,一個test,一個tb1: db.test.save({name:"apple"}); for(var i = 1; i <=100; i++) {db.tb1.insert({name:"hank"+i});} 再備份一次數據庫 mongodump -h 192.168.0.188 -o /root/backcup/ 進入數據庫作一些操做 db.test.drop(); db.tb1.remove({}); db.tb1.insert({name:"xxxxxxxxxxxxxxxxxxxx"}); 如今,咱們要恢復到操做以前,怎麼作呢,第一個就是用咱們剛纔全備直接restore回去,另外若是想恢復到任何一個點的話,那麼就須要oplog 導出oplog mongodump -h 192.168.0.188 -d local -c oplog.rs -o root/backup/ 2017-05-19T15:26:15.686+0800 writing local.oplog.rs to 2017-05-19T15:26:15.748+0800 done dumping local.oplog.rs (10458 documents) 使用bsondump 查看oplog日誌 root@ubuntu:~/backup/local# bsondump oplog.rs.bson |grep drop 對drop來進行過濾 {"ts":{"$timestamp":{"t":1495179002,"i":1}},"t":{"$numberLong":"15"},"h":{"$numberLong":"2437177398902103200"},"v":2,"op":"c","ns":"test.$cmd","o":{"drop":"test"}} 2017-05-19T15:27:36.202+0800 10458 objects found 找到這條記錄之後 咱們記住這個時間戳:{"t":1495179002,"i":1}} 而後咱們使用mongorestore mongorestore -h 192.168.0.188 --oplogReplay --oplogLimit "1495179002:1" /root/backup/ 2017-05-19T15:32:30.821+0800 no indexes to restore 2017-05-19T15:32:30.821+0800 finished restoring b_tuxi_logs.users (14 documents) 2017-05-19T15:32:30.863+0800 error: E11000 duplicate key error collection: tuxi2.pending index: _id_ dup key: { : ObjectId('591e540da26f3b893f77bac6') } 2017-05-19T15:32:30.863+0800 no indexes to restore 2017-05-19T15:32:30.863+0800 finished restoring tuxi2.pending (1 document) 2017-05-19T15:32:30.863+0800 no indexes to restore 2017-05-19T15:32:30.863+0800 finished restoring test.test (1 document) 2017-05-19T15:32:30.863+0800 no indexes to restore 2017-05-19T15:32:30.863+0800 finished restoring a.b (1 document) 2017-05-19T15:32:30.863+0800 restoring users from /root/backup/admin/system.users.bson 2017-05-19T15:32:31.458+0800 replaying oplog 2017-05-19T15:32:32.684+0800 oplog 577KB 2017-05-19T15:32:34.263+0800 oplog 2.03MB 2017-05-19T15:32:34.263+0800 done a:PRIMARY> db.tb1.find() { "_id" : ObjectId("591e9f03c7fc4390ee9a9fa8"), "name" : "xxxxxxxxxxxxxxxxxxxx" } { "_id" : ObjectId("591e9da0f7a418c28d9403b3"), "name" : "hank1" } { "_id" : ObjectId("591e9da0f7a418c28d9403b4"), "name" : "hank2" } { "_id" : ObjectId("591e9da0f7a418c28d9403b5"), "name" : "hank3" } { "_id" : ObjectId("591e9da0f7a418c28d9403b6"), "name" : "hank4" } { "_id" : ObjectId("591e9da0f7a418c28d9403b7"), "name" : "hank5" } { "_id" : ObjectId("591e9da0f7a418c28d9403b8"), "name" : "hank6" } { "_id" : ObjectId("591e9da0f7a418c28d9403b9"), "name" : "hank7" } { "_id" : ObjectId("591e9da0f7a418c28d9403ba"), "name" : "hank8" } { "_id" : ObjectId("591e9da0f7a418c28d9403bb"), "name" : "hank9" } { "_id" : ObjectId("591e9da0f7a418c28d9403bc"), "name" : "hank10" } { "_id" : ObjectId("591e9da0f7a418c28d9403bd"), "name" : "hank11" } { "_id" : ObjectId("591e9da0f7a418c28d9403be"), "name" : "hank12" } { "_id" : ObjectId("591e9da0f7a418c28d9403bf"), "name" : "hank13" } { "_id" : ObjectId("591e9da0f7a418c28d9403c0"), "name" : "hank14" } { "_id" : ObjectId("591e9da0f7a418c28d9403c1"), "name" : "hank15" } { "_id" : ObjectId("591e9da0f7a418c28d9403c2"), "name" : "hank16" } { "_id" : ObjectId("591e9da0f7a418c28d9403c3"), "name" : "hank17" } { "_id" : ObjectId("591e9da0f7a418c28d9403c4"), "name" : "hank18" } { "_id" : ObjectId("591e9da0f7a418c28d9403c5"), "name" : "hank19" } Type "it" for more 能夠看到 數據都已經恢復徹底
mongodb的備份恢復 只要有oplog日誌,而且根據日誌的時間戳就能夠恢復到任意一節點。而且由於oplog是一個增加的過程。因此須要根據業務狀況來合理的設置oplog的日誌大小。linux