DRBD(Distributed Replicated Blocak Device): 分佈式複製塊設備node
DRBD: 主從
-primary: 可執行讀、寫操做
-secondary: 文件系統不能掛載
DRBD: dual primay, 雙主
-磁盤調度器(DIsk Scheduler):合併讀請求,合併寫請求
Procotol:
-A: Async, 異步
-B:semi sync, 半同步
-C:sync, 同步
DRBD Source:
-資源名稱:能夠是除了空白字符外的任意ACSII碼字符;
-DRBD設備:在雙方節點上,此DRBD設備的設備文件;通常爲/dev/drbdN,其主設備號147
-磁盤:在雙方節點上,各自提供的存儲設備;
-網絡配置:雙方數據同步時所使用的網絡屬性;web
drbd1: 10.11.8.145
drbd2: 10.11.8.158算法
前提: 時間同步,hosts解析,ssh雙機互信vim
kernel 2.6.32 以前包含32: 編譯安裝 drbd
kernel 2.6.33 以後包含33(內核中整合了module), 只安裝管理工具便可: 編譯 drbd-utils網絡
root@drbd1:~# cat /etc/drbd.conf # You can find an example in /usr/share/doc/drbd.../drbd.conf.example include "drbd.d/global_common.conf"; include "drbd.d/*.res";
/etc/drbd.d/global_common.conf : 全局配置文件
/etc/drbd.d/*.res : 資源定義文件ssh
root@drbd1:~# vim /etc/drbd.d/global_common.conf # DRBD is the result of over a decade of development by LINBIT. # In case you need professional services for DRBD or have # feature requests visit http://www.linbit.com global { usage-count yes; #參與在線使用計數器 # minor-count dialog-refresh disable-ip-verification # cmd-timeout-short 5; cmd-timeout-medium 121; cmd-timeout-long 600; } common { handlers { #處理腳本 # These are EXAMPLE handlers only. # They may have severe implications, # like hard resetting the node under certain circumstances. # Be careful when chosing your poison. pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"; pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"; local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f"; # fence-peer "/usr/lib/drbd/crm-fence-peer.sh"; # split-brain "/usr/lib/drbd/notify-split-brain.sh root"; # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root"; # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k"; # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh; } startup { # wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb } options { # cpu-mask on-no-data-accessible } disk { resync-rate 1000M; #同步速率 on-io-error detach; #當磁盤IO錯誤時的動做 # size on-io-error fencing disk-barrier disk-flushes # disk-drain md-flushes resync-rate resync-after al-extents # c-plan-ahead c-delay-target c-fill-target c-max-rate # c-min-rate disk-timeout } net { cram-hmac-alg "sha1"; #通訊時的加密算法 shared-secret "Z5yWHwfgV3Ca"; #身份驗證時所使用的祕鑰 protocol C; #通訊協議 # protocol timeout max-epoch-size max-buffers unplug-watermark # connect-int ping-int sndbuf-size rcvbuf-size ko-count # allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri # after-sb-1pri after-sb-2pri always-asbp rr-conflict # ping-timeout data-integrity-alg tcp-cork on-congestion # congestion-fill congestion-extents csums-alg verify-alg # use-rle } }
usage-count {val}: Please participate in DRBD's online usage counter [http://usage.drbd.org]. The most convenient way to do so is to set this option to yes. Valid options are: yes, no and ask.
pri-on-incon-degr {cmd}: This handler is called if the node is primary, degraded and if the local copy of the data is inconsistent.
pri-lost-after-sb {cmd}: The node is currently primary, but lost the after-split-brain auto recovery procedure. As as consequence, it should be abandoned.異步
具體參數參考官方文檔: https://www.drbd.org/en/doc/tcp
lvcreate -L 4G -n data vol1
這裏我新建了一個邏輯卷/dev/dm-2分佈式
root@drbd1:~# vim /etc/drbd.d/data.res resource data { device /dev/drbd0; disk /dev/dm-2; meta-disk internal; on drbd1 { address 10.11.8.145:7789; } on drbd2 { address 10.11.8.158:7789; } }
PS: drbd 註冊使用的端口爲 7788 - 7799工具
root@drbd1:~# scp /etc/drbd.d/* drbd2:/etc/drbd.d/
在兩個節點上分別初始化資源: (必須先執行此步,纔可啓動服務)
root@drbd1:~# drbdadm create-md data root@drbd2:~# drbdadm create-md data
root@drbd1:~# service drbd start root@drbd2:~# service drbd start
root@drbd1:~# drbd-overview 0:data/0 Connected Secondary/Secondary Inconsistent/Inconsistent
root@drbd1:~# drbdadm primary data --force root@drbd1:~# drbd-overview 0:web SyncSource Primary/Secondary UpToDate/Inconsistent C r---- [============>.......] sync'ed: 66.2% (172140/505964)K delay_probe: 35 root@drbd1:~# drbd-overview 0:data/0 Connected Primary/Secondary UpToDate/UpToDate
root@drbd1:~# mke2fs -j /dev/drbd0 #執行時間可能稍長 root@drbd1:~# mount /dev/drbd0 /data/drbd/ 寫入數據
此處注意: 要在drbd2上掛載, 必須如今drbd1上卸載
而且, 由於drbd設備只能在primary節點上掛載, 所以還須要將drbd1設爲secondary, 將drbd2設爲primary後, 才能夠在drbd2上掛載
root@drbd1:~# umount /dev/drbd0 #必須先卸載 root@drbd1:~# drbdadm secondary data root@drbd2:~# drbdadm primary data root@drbd2:~# mount /dev/drbd0 /data/drbd/ $drbd2上掛載 查看數據
完成. 如下爲補充
drbdadm up <resource> #啓用資源
drbdadm down <resource> #停用資源
drbdadm primary <resource> #升級資源
drbdadm secondary <resource> #降級資源
drbdadm create-md <resource> #初始化資源
drbdadm adjust <resource> #從新配置資源
drbdadm connect <resource> #啓動鏈接
drbdadm disconnect <resource> #關閉鏈接
-d, --dry-run : 只打印出命令的輸出, 並不真正執行命令
PS: DUnknown故障參考此處: DRBD 故障恢復
resource mydrbd { net { protocol C; allow-two-primaries yes; } startup { become-primary-on both; } disk { fencing resource-and-stonith; } handlers { # Make sure the other node is confirmed # dead after this! outdate-peer "/sbin/kill-other-node.sh"; } on node1 { device /dev/drbd0; disk /dev/vg0/mydrbd; address 172.16.200.11:7789; meta-disk internal; } on node2 { device /dev/drbd0; disk /dev/vg0/mydrbd; address 172.16.200.12:7789; meta-disk internal; } }