集羣的deartbeat與drbd服務html
咱們用到的集羣系統主要就2種:node
高可用(High Availability)HA集羣, 使用Heartbeat實現;也會稱爲」雙機熱備」, 「雙機互備」, 「雙機」。
負載均衡羣集(Load Balance Cluster),使用Linux Virtual Server(LVS)實現;mysql
heartbeat (Linux-HA)的工做原理:heartbeat最核心的包括兩個部分,心跳監測部分和資源接管部分,心跳監測能夠經過網絡鏈路和串口進行,並且支持冗 餘鏈路,它們之間相互發送報文來告訴對方本身當前的狀態,若是在指定的時間內未受到對方發送的報文,那麼就認爲對方失效,這時需啓動資源接管模塊來接管運 行在對方主機上的資源或者服務。sql
須要安裝的包:shell
heartbeat-3.0.4-2.el6.x86_64.rpm vim
heartbeat-libs-3.0.4-2.el6.x86_64.rpmapi
heartbeat-devel-3.0.4-2.el6.x86_64.rpm 緩存
ldirectord-3.9.5-3.1.x86_64.rpmbash
步驟:網絡
一。將rpm包安裝在server1和server2的一個目錄中,
在server1中切到剛纔的rpm包所在的目錄中,安裝
yum install * -y##安裝全部的rpm包
[root@server1 heartbeat]# cd /etc/ha.d/
[root@server1 ha.d]# cp /usr/share/doc/heartbeat-3.0.4/{authkeys,ha.cf,haresources} .
[root@server1 ha.d]# ls
authkeys ha.cf harc haresources rc.d README.config resource.d shellfuncs
[root@server1 ha.d]# vim ha.cf
48 keepalive 2
56 deadtime 30
71 initdead 60
76 udpport 12345
91 bcast eth0 # Linux
113 #mcast eth0 225.0.0.1 694 1 0
121 #ucast eth0 1
157 auto_failback on
211 node server12-10.example.com
212 node server12-20.example.com
220 ping 172.25.50.250
253 respawn hacluster /usr/lib64/heartbeat/ipfail
259 apiauth ipfail gid=haclient uid=hacluster
[root@server1 ha.d]# vim authkeys
23 auth 1
24 1 crc
25 #2 sha1 HI!
26 #3 md5 Hello!
[root@server1 ha.d]# vim haresources
150 server1.example.com IPaddr::172.25.50.100/24/eth0 httpd
[root@server1 ha.d]# chmod 600 authkeys
[root@server1 ha.d]# ll -d authkeys
-rw------- 1 root root 643 2月 17 15:06 authkeys
[root@server1 ha.d]# scp ha.cf haresources authkeys 172.25.50.20:/etc/ha.d/
root@172.25.50.20's password:
ha.cf 100% 10KB 10.3KB/s 00:00
haresources 100% 5961 5.8KB/s 00:00
authkeys 100% 643 0.6KB/s 00:00
[root@server1 ha.d]# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
在server2上啓動heartbeat服務
[root@server2 ha.d]# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
測試:
在server1的var/www/html/目錄下:
[root@server1 html]# vim index.html
www.server1.example
在server2上
[root@server2 ha.d]# cd /var/www/html/
[root@server2 html]# ls
[root@server2 html]# vim index.html
在server1上
[root@server1 html]# ip addr show
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:06:13:fa brd ff:ff:ff:ff:ff:ff
inet 172.25.50.10/24 brd 172.25.50.255 scope global eth0
inet 172.25.50.100/24 brd 172.25.50.255 scope global secondary eth0
inet6 fe80::5054:ff:fe06:13fa/64 scope link
valid_lft forever preferred_lft forever
說明http服務是在servre1上啓動的
在真機上
[root@real50 Desktop]# curl 172.25.50.100
##訪問的內容是在ip:100的主機的http默認發佈目錄上的內容
Server1上:關閉心跳
[root@server1 html]# /etc/init.d/heartbeat stop
Stopping High-Availability services: Done.
這時侯。100這個ip就到server2這個主機上
真機:
[root@real50 Desktop]# curl 172.25.50.100
當server1上的heartbeat服務啓動後,100這個ip就從新回切到server1主機上
測試2:關閉httpd服務
(開啓heartbeat服務,)
[root@server1 ha.d]# /etc/init.d/httpd stop
Stopping httpd: [ OK ]
[root@server1 ha.d]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:51:aa:19 brd ff:ff:ff:ff:ff:ff
inet 172.25.50.10/24 brd 172.25.12.255 scope global eth0
inet 172.25.50.100/24 brd 172.25.12.255 scope global secondary eth0
inet6 fe80::5054:ff:fe51:aa19/64 scope link
valid_lft forever preferred_lft forever
[root@real Desktop]# curl 172.25.12.100
curl: (7) Failed connect to 172.25.12.100:80; Connection refused
[root@real Desktop]# arp -an | grep 172.25.12.100
? (172.25.12.100) at 52:54:00:51:aa:19 [ether] on br0
##############drbd服務####################
首先先生成rpm包
Server1上
解壓事先發送到server1上的bdrm-8.4.2.tar.gz包
[root@server1 mnt]# tar zxf bdrm-8.4.2.tar.gz
[root@server1 mnt]# cd drbd-8.4.2
[root@server1 drbd-8.4.2]# yum install gcc -y
[root@server1 drbd-8.4.2]# yum install flex -y
[root@server1 drbd-8.4.2]yum install rpm-build -y
[root@server1 drbd-8.4.2]yum install kernel-devel -y
[
root@server1 drbd-8.4.2]./configure --enable-spec
生成drbd.spec這個文件
[root@server1 drbd-8.4.2]./configure --enable-spec --with-km
生成drbd-km.spec 文件
[root@server1 drbd-8.4.2]cp /mnt/drbd-8.4.2.tar.gz /root/rpmbuild/SOURCES/
[root@server1 drbd-8.4.2]rpmbuild -bb drbd.spec#編譯drbd.spec文件
[root@server1 drbd-8.4.2]# rpmbuild -bb drbd-km.spec#編譯drbd-km.spec 文件
[root@server1 drbd-8.4.2]cd /root/rpmbuild/RPMS/x86_64/
[root@server1 drbd-8.4.2]# cd /root/rpmbuild/RPMS/x86_64/
[root@server1 x86_64]# ls
drbd-8.4.2-2.el6.x86_64.rpm
drbd-bash-completion-8.4.2-2.el6.x86_64.rpm
drbd-heartbeat-8.4.2-2.el6.x86_64.rpm
drbd-km-2.6.32_431.el6.x86_64-8.4.2-2.el6.x86_64.rpm
drbd-pacemaker-8.4.2-2.el6.x86_64.rpm
drbd-udev-8.4.2-2.el6.x86_64.rpm
drbd-utils-8.4.2-2.el6.x86_64.rpm
drbd-xen-8.4.2-2.el6.x86_64.rpm
rpm -ivh *
scp * 172.25.30.2: --> 而後在 server2 上執行: cd --> rpm -ivh *
安裝以上軟件後就能夠做接下來的實驗拉
############存儲#####
給server1和server2上同時添加一起4G虛擬硬盤
在server1 sevrer2上執行fdisk -l 查看添加磁盤路徑,這裏是/dev/vda
[root@server1 drbd.d]# fdisk -l
Disk /dev/vda: 4294 MB, 4294967296 bytes
16 heads, 63 sectors/track, 8322 cylinders
Units = cylinders of 1008 * 512 = 516096 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
切入: cd /etc/drbd.d/
[root@server1 drbd.d]# ls
global_common.conf
[root@server1 drbd.d]# vim lyitx.res
resource lyitx {
meta-disk internal;
device /dev/drbd1;
syncer {
verify-alg sha1;
}
on server1.example.com {
disk /dev/vda;
address 172.25.50.10:7789;
}
on server2.example.com {
disk /dev/vda;
address 172.25.50.20:7789;
}
}
[root@server1 drbd.d]# scp lyitx.res 172.25.50.20:/etc/drbd.d/#複製到這個文件夾
[root@server1 drbd.d]# drbdadm create-md lyitx#server1和server2上同時執行
Writing meta data...
initializing activity log
NOT initializing bitmap
New drbd meta data block successfully created.
[root@server1 drbd.d]# /etc/init.d/drbd start
Starting DRBD resources: [
create res: lyitx
prepare disk: lyitx
adjust disk: lyitx
adjust net: lyitx
]
..........
***************************************************************
DRBD's startup script waits for the peer node(s) to appear.
- In case this node was already a degraded cluster before the
reboot the timeout is 0 seconds. [degr-wfc-timeout]
- If the peer was available before the reboot the timeout will
expire after 0 seconds. [wfc-timeout]
(These values are for resource 'lyitx'; 0 sec -> wait forever)
To abort waiting enter 'yes' [ 14]:
.[root@server1 drbd.d]# cat /proc/drbd #查看該主機是否可以掛載
version: 8.4.2 (api:1/proto:86-101)
GIT-hash: 7ad5f850d711223713d6dcadc3dd48860321070c build by root@server1.example.com, 2017-02-17 16:28:52
1: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r-----
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:4194140
[root@server1 drbd.d]# drbdadm primary lyitx --force#強制將當前主機設置primary模式
[root@server1 drbd.d]# cat /proc/drbd
cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r---n-
[root@server1 drbd.d]# mkfs.ext4 /dev/drbd1#格式化成ext4,不然沒法掛載,
[root@server1 drbd.d]# cd /mnt/
[root@server1 mnt]# mount /dev/drbd1 /mnt#掛載
[root@server1 /]# cd /mnt
[root@server1 mnt]# ls
lost+found
[root@server1 mnt]# vim index.html#寫的一個測試頁面
[root@server1 mnt]# ls
index.html lost+found
[root@server1 ~]# cd
[root@server1 ~]# umount /mnt#卸載
[root@server1 ~]# drbdadm secondary lyitx#將當前主機設置成secondary模式,只有這樣其餘主機纔可以設置成primary
在servre2上
[root@server2 drbd.d]# drbdadm primary lyitx#設置成primary模式
[root@server2 drbd.d]# cat /proc/drbd
1: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r---
[root@server2 drbd.d]# mount /dev/drbd1 /mnt#掛載,這裏就無需格式化,
[root@server2 drbd.d]# cd /mnt
[root@server2 mnt]# ls
index.html lost+found
[root@server2 mnt]# cat index.html
server1.example.com
在server2上看到server1的測試頁面,說明測試成功
#######heartbeat+mysql###################
將heartbeat服務和mysql服務結合在一塊兒,經過heartbeat服務實現雙機熱備,
步驟:
關閉兩臺虛擬機的 heartbeat服務
[root@server1 mnt]# yum install mysql-server -y
[root@server1 mnt]#drbdadm primary example
[root@server1 mnt]mount /dev/drbd1 /mnt
[root@server1 mnt]cd /var/lib/mysql/
/etc/init.d/mysqld start
/etc/init.d/mysqld stop
[root@server1 mysql]# cp -r * /mnt/
[root@server1 mysql]# cd /mnt/
[root@server1 mnt]# ls
ibdata1 ib_logfile0 ib_logfile1 index.html lost+found mysql mysql.sock test
[root@server1 mnt]# rm -fr mysql.sock #開啓mysql服務後,會生成mysql.sock緩存文件,關閉服務後刪除這個文件。(當mysql啓動時,會再次生成)
[root@server1 mnt]# cd
[root@server1 ~]# umount /mnt/ ##這時mysql服務必須關閉,不然沒法卸載
[root@server1 ~]# mount /dev/drbd1 /var/lib/mysql/
[root@server1 ~]# chown mysql.mysql /var/lib/mysql/ -R
[root@server1 mysql]# /etc/init.d/mysqld start
正在啓動 mysqld: [肯定]
[root@server1 mysql]# cd
[root@server1 ~]# /etc/init.d/mysqld stop
中止 mysqld: [肯定]
[root@server1 ~]# umount /var/lib/mysql/
[root@server1 ~]# drbdadm secondary lyitx
在server2上
[root@server2 /]# yum install mysql-server -y
[root@server2 /]# drbdadm primary lyitx
[root@server2 /]# mount /dev/drbd1 /var/lib/mysql/
[root@server2 /]# df##這時可以看到掛載的mysql
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/VolGroup-lv_root 19134332 1106972 17055380 7% /
tmpfs 510200 0 510200 0% /dev/shm
/dev/sda1 495844 33458 436786 8% /boot
/dev/drbd1 4128284 95192 3823388 3% /var/lib/mysql
[root@server2 /]# /etc/init.d/mysqld start
[root@server2 /]# /etc/init.d/mysqld stop
[root@server2 /]# umount /var/lib/mysql/
[root@server2 /]# drbdadm secondary lyitx
[root@server1 ~]# vim /etc/ha.d/haresources
在最後一行修改:
server1.example.com IPaddr::172.25.50.100/24/eth0 drbddisk::lyitx Filesystem::/dev/drbd1::/var/lib/mysql::ext4 mysqld
[root@server1 ~]# scp /etc/ha.d/haresources 172.25.50.20:/etc/ha.d/
root@172.25.50.20's password:
haresources 100% 6023 5.9KB/s 00:00
[root@server1 ~]# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
[root@server2 ~]# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
測試:server1: /etc/init.d/heartbeat stop --> 去server2上進行 df 查看,若查看到,則表示成功