1、環境node
系統: CentOS 6.4x64 最小化安裝linux
node1: 192.168.1.13shell
node2: 192.168.1.14bash
vip: 192.168.1.15服務器
nfs: 192.168.1.10dom
2、基礎配置ssh
node1和node2的操做同樣ide
#關閉iptables和selinux [root@node1 ~]# getenforce Disabled #確保這項是正確的 [root@node1 ~]# service iptables stop #配置本地hosts解析 [root@node1 ~]# echo "192.168.1.13 node1" >>/etc/hosts [root@node1 ~]# echo "192.168.1.14 node2" >>/etc/hosts [root@node1 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.1.13 node1 192.168.1.14 node2 #配置epel源 [root@node1 ~]# rpm -ivh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm [root@node1 ~]# sed -i 's@#b@b@g' /etc/yum.repos.d/epel.repo [root@node1 ~]# sed -i 's@mirrorlist@#mirrorlist@g' /etc/yum.repos.d/epel.repo #同步時間 [root@node1 ~]# yum install ntp -y [root@node1 ~]# echo "*/10 * * * * /usr/sbin/ntpdate asia.pool.ntp.org &>/dev/null" >/var/spool/cron/root [root@node1 ~]# ntpdate asia.pool.ntp.org 21 Jun 17:32:45 ntpdate[1561]: step time server 211.233.40.78 offset -158.552839 sec [root@node1 ~]# hwclock -w #配置ssh互信 [root@node1 ~]# ssh-keygen [root@node1 ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@node2
3、安裝配置heartbeat測試
(1).安裝heartbeatspa
#在ha-node1和ha-node2都執行安裝操做 [root@node1 ~]# yum install heartbeat -y
(2).配置ha.cf
[root@node1 ~]# cd /usr/share/doc/heartbeat-3.0.4/ [root@node1 heartbeat-3.0.4]# cp authkeys ha.cf haresources /etc/ha.d/ [root@node1 heartbeat-3.0.4]# cd /etc/ha.d/ [root@node1 ha.d]# ls authkeys ha.cf harc haresources rc.d README.config resource.d shellfuncs [root@node1 ha.d]# egrep -v "^$|^#" /etc/ha.d/ha.cf logfile /var/log/ha-log logfacility local1 keepalive 2 deadtime 30 warntime 10 initdead 120 mcast eth0 225.0.10.1 694 1 0 auto_failback on node node1 node node2 crm no
(3).配置authkeys
[root@node1 ha.d]# dd if=/dev/random bs=512 count=1 | openssl md5 0+1 records in 0+1 records out 21 bytes (21 B) copied, 3.1278e-05 s, 671 kB/s (stdin)= 4206bd8388c16292bc03710a0c747f59 [root@node1 ha.d]# grep -v ^# /etc/ha.d/authkeys auth 1 1 md5 4206bd8388c16292bc03710a0c747f59 #將認證文件權限修改爲600 [root@node1 ~]# chmod 600 /etc/ha.d/authkeys
(4).配置haresource
[root@node1 ha.d]# grep -v ^# /etc/ha.d/haresources node1 IPaddr::192.168.1.15/24/eth0
(5).啓動heartbeat
[root@node1 ha.d]# scp authkeys haresources ha.cf node2:/etc/ha.d/ #node1啓動服務 [root@node1 ~]# /etc/init.d/heartbeat start Starting High-Availability services: INFO: Resource is stopped Done. [root@node1 ~]# chkconfig heartbeat off #說明:關閉開機自啓動,當服務器重啓時,須要人工去啓動 #node2啓動服務 [root@node2 ~]# /etc/init.d/heartbeat start #查看結果 [root@node1 ~]# ip a |grep eth0 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0 inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0 #vip在主節點上 [root@node2 ~]# ip a |grep eth0 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0 #備節點上沒有vip
(6).測試heartbeat
正常狀態
#node1信息 [root@node1 ~]# ip a |grep eth0 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0 inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0 #vip在主節點上 #node2信息 [root@node2 ~]# ip a |grep eth0 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0 #備節點上沒有vip
模擬主節點宕機後的狀態信息
#在主節點node1中止heartbeat服務 [root@node1 ~]# /etc/init.d/heartbeat stop Stopping High-Availability services: Done. [root@node1 ~]# ip a |grep eth0 #主節點的heartbeat服務中止後,vip資源被搶走 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0 #在備節點node2查看資源 [root@node2 ~]# ip a |grep eth0 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0 inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0
恢復主節點的heartbeat服務
[root@node1 ~]# /etc/init.d/heartbeat start Starting High-Availability services: INFO: Resource is stopped Done. #主節點的heartbeat服務恢復後,將資源接管回來了 [root@node1 ~]# ip a |grep eth0 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0 inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0 #查看備節點 [root@node2 ~]# ip a |grep eth0 #vip資源已移除 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0
4、安裝部署DRBD
(1).對硬盤進行分區,node1和node2的操做同樣
[root@node1 ~]# fdisk /dev/sdb #說明:/dev/sdb分紅2個分區/dev/sdb1和/dev/sdb2,/dev/sdb1=19G [root@node1 ~]# partprobe /dev/sdb #對分區進行格式化 [root@node1 ~]# mkfs.ext4 /dev/sdb1 說明:sdb2分區爲meta data分區,不須要格式化操做 [root@node1 ~]# tune2fs -c -1 /dev/sdb1 說明:設置最大掛載數爲-1,關閉強制檢查掛載次數限制
(2).安裝DRBD
因爲咱們的系統是CentOS6.4的,因此咱們還須要安裝內核模塊,版本須要和uname -r保持一致,安裝包咱們從系統安裝軟件中提取出來,過程略。node1和node2的安裝過程同樣,這裏只給出node1的安裝過程
#安裝內核文件 [root@node1 ~]# rpm -ivh kernel-devel-2.6.32-358.el6.x86_64.rpm kernel-headers-2.6.32-358.el6.x86_64.rpm [root@node1 ~]# yum install drbd84 kmod-drbd84 -y
(3).配置DRBD
a.修改全局配置文件
[root@node1 ~]# egrep -v "^$|^#|^[[:space:]]+#" /etc/drbd.d/global_common.conf global { usage-count no; } common { protocol C; handlers { } startup { } options { } disk { on-io-error detach; no-disk-flushes; no-md-flushes; rate 200M; } net { sndbuf-size 512k; max-buffers 8000; unplug-watermark 1024; max-epoch-size 8000; cram-hmac-alg "sha1"; shared-secret "weyee2014"; after-sb-0pri disconnect; after-sb-1pri disconnect; after-sb-2pri disconnect; rr-conflict disconnect; } }
b.增長資源
[root@node1 ~]# cat /etc/drbd.d/nfsdata.res resource nfsdata { on node1 { device /dev/drbd1; disk /dev/sdb1; address 192.168.1.13:7789; meta-disk /dev/sdb2 [0]; } on node2 { device /dev/drbd1; disk /dev/sdb1; address 192.168.1.14:7789; meta-disk /dev/sdb2 [0]; } }
c.將配置文件複製到node2上,重啓系統加載drbd模塊,初始化meta數據
[root@node1 ~]# scp global_common.conf nfsdata.res node2:/etc/drbd.d/ [root@node1 ~]# depmod [root@node1 ~]# modprobe drbd [root@node1 ~]# lsmod |grep drbd drbd 365931 0 libcrc32c 1246 1 drbd #在node1初始化meta數據 [root@node1 ~]# drbdadm create-md nfsdata initializing activity log NOT initializing bitmap Writing meta data... New drbd meta data block successfully created. #在node2上加載模塊,初始化meta數據 [root@node2 ~]# depmod [root@node2 ~]# modprobe drbd [root@node2 ~]# lsmod |grep drbd drbd 365931 0 libcrc32c 1246 1 drbd [root@node2 ~]# drbdadm create-md nfsdata initializing activity log NOT initializing bitmap Writing meta data... New drbd meta data block successfully created.
d.在node1和node2上啓動drbd
#node1操做