以前詳細介紹了haproxy的基礎知識點, 下面記錄下Haproxy+Heartbeat高可用web集羣方案實現過程, 以加深理解.html
架構草圖以下:node
1) 基本環境準備 (centos6.9系統)linux
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
172.16.60.208(eth0) HA主節點(ha-master) haproxy,heartbeat
172.16.60.207(eth0) HA備節點(ha-slave) haproxy,heartbeat
172.16.60.229 VIP地址
172.16.60.204(eth0) 後端節點1(rs-204) nginx
/tomcat
172.16.60.205(eth0) 後端節點2(rs-205) nginx
/tomcat
1) 關閉防火牆和selinux (四臺節點機都操做)
[root@ha-master ~]
# /etc/init.d/iptables stop
[root@ha-master ~]
# setenforce 0
[root@ha-master ~]
# vim /etc/sysconfig/selinux
SELINUX=disabled
2) 設置主機名和綁定hosts (兩臺HA節點機器都操做)
主節點操做
[root@ha-master ~]
# hostname ha-master
[root@ha-master ~]
# vim /etc/sysconfig/network
HOSTNAME=ha-master
[root@ha-master ~]
# vim /etc/hosts
172.16.60.208 ha-master
172.16.60.207 ha-slave
備節點操做
[root@ha-slave ~]
# hostname ha-slave
[root@ha-slave ~]
# vim /etc/sysconfig/network
HOSTNAME=ha-slave
[root@ha-slave ~]
# vim /etc/hosts
172.16.60.208 ha-master
172.16.60.207 ha-slave
|
2) 安裝後端兩個realserver節點的web環境 (即172.16.60.204/205兩臺機器都要安裝nginx)nginx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
採用yum方式在兩臺realserver節點上安裝nginx (先安裝nginx的yum源)
[root@rs-204 ~]
# rpm -ivh http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
[root@rs-204 ~]
# yum install -y nginx
rs-204的nginx配置
[root@rs-204 ~]
# cd /etc/nginx/conf.d/
[root@rs-204 conf.d]
# cat default.conf
[root@rs-204 conf.d]
# >/usr/share/nginx/html/index.html
[root@rs-204 conf.d]
# vim /usr/share/nginx/html/index.html
this is
test
page of realserver01:172.16.60.204
[root@rs-204 conf.d]
# /etc/init.d/nginx start
Starting nginx: [ OK ]
[root@rs-204 conf.d]
# lsof -i:80
COMMAND PID USER FD TYPE DEVICE SIZE
/OFF
NODE NAME
nginx 31944 root 6u IPv4 91208 0t0 TCP *:http (LISTEN)
nginx 31945 nginx 6u IPv4 91208 0t0 TCP *:http (LISTEN)
rs-205的nginx配置
[root@rs-205 src]
# cd /etc/nginx/conf.d/
[root@rs-205 conf.d]
# cat default.conf
[root@rs-205 conf.d]
# >/usr/share/nginx/html/index.html
[root@rs-205 conf.d]
# vim /usr/share/nginx/html/index.html
this is
test
page of realserver02:172.16.60.205
[root@rs-205 conf.d]
# /etc/init.d/nginx start
Starting nginx: [ OK ]
[root@rs-205 conf.d]
# lsof -i:80
COMMAND PID USER FD TYPE DEVICE SIZE
/OFF
NODE NAME
nginx 20839 root 6u IPv4 289527645 0t0 TCP *:http (LISTEN)
nginx 20840 nginx 6u IPv4 289527645 0t0 TCP *:http (LISTEN)
訪問http:
//172
.16.60.204/, 訪問結果爲
"this is test page of realserver01:172.16.60.204"
訪問http:
//172
.16.60.205/, 訪問結果爲
"this is test page of realserver02:172.16.60.205"
|
3) 安裝配置Haproxy (兩臺HA節點機進行一樣操做)c++
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
|
1) 先安裝haproxy
[root@ha-master ~]
# yum install gcc gcc-c++ make openssl-devel kernel-devel
[root@ha-master ~]
# cd /usr/local/src/ #下載haproxy軟件到/usr/local/src目錄下
[root@ha-master src]
# ls haproxy-1.8.12.tar.gz
haproxy-1.8.12.
tar
.gz
[root@ha-master src]
# tar -zvxf haproxy-1.8.12.tar.gz
[root@ha-master src]
# cd haproxy-1.8.12
[root@ha-master haproxy-1.8.12]
# make TARGET=linux26 CPU=x86_64 PREFIX=/usr/local/haprpxy USE_OPENSSL=1 ADDLIB=-lz
參數說明:
TARGET=linux26
#使用 uname -r 查看內核,如:2.6.32-642.el6.x86_64,此時該參數就爲linux26
CPU=x86_64
#使用 uname -r 查看系統信息,如 x86_64 GNU/Linux,此時該參數就爲 x86_64
PREFIX=
/usr/local/haprpxy
#haprpxy 安裝路徑
[root@ha-master haproxy-1.8.12]
# ldd haproxy | grep ssl
libssl.so.10 =>
/usr/lib64/libssl
.so.10 (0x00000031d0400000)
[root@ha-master haproxy-1.8.12]
# make install PREFIX=/usr/local/haproxy
[root@ha-master haproxy-1.8.12]
# mkdir -p /usr/local/haproxy/conf
[root@ha-master haproxy-1.8.12]
# mkdir -p /etc/haproxy
[root@ha-master haproxy-1.8.12]
# cp /usr/local/src/haproxy-1.8.12/examples/option-http_proxy.cfg /usr/local/haproxy/conf/haproxy.cfg
[root@ha-master haproxy-1.8.12]
# ln -s /usr/local/haproxy/conf/haproxy.cfg /etc/haproxy/haproxy.cfg
[root@ha-master haproxy-1.8.12]
# cp -r /usr/local/src/haproxy-1.8.12/examples/errorfiles /usr/local/haproxy/errorfiles
[root@ha-master haproxy-1.8.12]
# ln -s /usr/local/haproxy/errorfiles /etc/haproxy/errorfiles
[root@ha-master haproxy-1.8.12]
# mkdir -p /usr/local/haproxy/log
[root@ha-master haproxy-1.8.12]
# touch /usr/local/haproxy/log/haproxy.log
[root@ha-master haproxy-1.8.12]
# ln -s /usr/local/haproxy/log/haproxy.log /var/log/haproxy.log
[root@ha-master haproxy-1.8.12]
# cp /usr/local/src/haproxy-1.8.12/examples/haproxy.init /etc/rc.d/init.d/haproxy
[root@ha-master haproxy-1.8.12]
# chmod +x /etc/rc.d/init.d/haproxy
[root@ha-master haproxy-1.8.12]
# chkconfig haproxy on
[root@ha-master haproxy-1.8.12]
# ln -s /usr/local/haproxy/sbin/haproxy /usr/sbin
2) haroxy.cfg文件進行負載配置
[root@ha-master haproxy-1.8.12]
# cd /usr/local/haproxy/conf/
[root@ha-master conf]
# cp haproxy.cfg haproxy.cfg.bak
[root@ha-master conf]
# > haproxy.cfg
[root@ha-master conf]
# vim haproxy.cfg
global
log 127.0.0.1 local3 info
maxconn 65535
chroot
/usr/local/haproxy
uid 99
gid 99
daemon
defaults
log global
mode http
retries 3
option redispatch
stats uri
/haproxy
stats refresh 30s
stats realm haproxy-status
stats auth admin:dxInCtFianKtL]36
stats hide-version
maxconn 65535
timeout connect 5000
timeout client 50000
timeout server 50000
frontend http-
in
mode http
maxconn 65535
bind :80
log global
option httplog
option httpclose
acl is_01 hdr_beg(host) www.kevin.com
use_backend web-server
if
is_01
backend web-server
mode http
balance roundrobin
cookie SERVERID insert indirect nocache
option httpclose
option forwardfor
server web01 172.16.60.204:80 weight 1 cookie 3 check inter 2000 rise 2 fall 5
server web02 172.16.60.205:80 weight 1 cookie 4 check inter 2000 rise 2 fall 5
3) 配置HAProxy日誌
[root@ha-master conf]
# vim /etc/rsyslog.conf
.......
$ModLoad imudp
#取消註釋 ,這一行不註釋,日誌就不會寫
$UDPServerRun 514
#取消註釋 ,這一行不註釋,日誌就不會寫
.......
local3.*
/var/log/haproxy
.log
#這一行必需要寫,由於在haproxy.cfg裏global全局定義好的日誌級別
[root@ha-master conf]
# vim /etc/sysconfig/rsyslog
SYSLOGD_OPTIONS=
"-r -m 0"
#接收遠程服務器日誌
重啓syslog日誌服務
[root@ha-master conf]
# service rsyslog restart
Shutting down system logger: [ OK ]
Starting system logger: [ OK ]
4) 設置haproxy負載均衡的最大併發鏈接數
查看內核
[root@ha-master conf]
# sysctl -a | grep file
fs.
file
-nr = 992 0 386459
fs.
file
-max = 386459
查看應用層面的需求
[root@ha-master conf]
# cat /usr/local/haproxy/conf/haproxy.cfg
global
#全局參數設置
maxconn 65535
#設置最大鏈接數
更改系統層面
[root@ha-master conf]
# vim /etc/security/limits.conf #最後一行增長
* - nofile 65535
5) 重啓兩臺HA機器的haproxy
[root@ha-master conf]
# /etc/init.d/haproxy start
Starting haproxy: [ OK ]
[root@ha-master conf]
# ps -ef|grep haproxy
nobody 13080 1 0 16:43 ? 00:00:00
/usr/sbin/haproxy
-D -f
/etc/haproxy/haproxy
.cfg -p
/var/run/haproxy
.pid
root 13083 11940 0 16:43 pts
/0
00:00:00
grep
haproxy
[root@ha-master conf]
# lsof -i:80
COMMAND PID USER FD TYPE DEVICE SIZE
/OFF
NODE NAME
haproxy 13080 nobody 4u IPv4 428975 0t0 TCP *:http (LISTEN)
將www.kevin.com域名解析到兩個HA節點上, 即172.16.60.208 和 172.16.60.207上
接着訪問http:
//www
.kevin.com/, 則發現訪問結果是
"this is test page of realserver01:172.16.60.204"
, 不斷刷新, 訪問結果也是這個.
只有當172.16.60.204這個節點的nginx掛了, 訪問結果才變成
"this is test page of realserver02:172.16.60.205"
, 即請求轉發到正常的realserver節點上.
從haproxy.cfg文件中能夠看出, 雖然配置了
"balance roundrobin"
這個選項, 即客戶端每一次訪問, 都跳轉到後端不一樣的服務器上. 可是並無生效!
由於又配置了
"cookie SERVERID insert indirect nocache"
, 即保持客戶端session會話同步的配置, 因此客戶端請求會一直轉發到同一個realserver節點上,直至
這個節點發生故障纔會轉發到另外正常的節點上.
把
"cookie SERVERID insert indirect nocache"
這個配置去掉或註釋掉, 再次訪問http:
//www
.kevin.com/, 就會發現每刷新一次, 請求都會轉發到不一樣的realserver
節點上, 即
"balance roundrobin"
配置生效!
訪問http:
//www
.kevin.com
/haproxy
, 輸入haproxy.cfg文件中配置的用戶名和密碼admin:dxInCtFianKtL]36, 便可打開haproxy監控頁面
|
從上圖能夠看出, 此時監控的後端兩個realserver節點的服務都是OK的(配置文件中定義的web01和web02此時都是綠色狀態)。 如今嘗試關閉rs-205的nginx服務, 刷新http://www.kevin.com/haproxy監控頁面, 發現web02變成紅色,即此時該節點服務是故障狀態!而後重啓rs-205的nginx服務,再次刷出監控頁面, 發現web02就又恢復到正常的綠色狀態了!web
4) 安裝配置Heartbeat (兩臺HA節點機進行一樣操做)redis
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
1) 首先安裝heartbeat (HA主備兩個節點都要一樣操做)
下載epel-release-latest-6.noarch.rpm
[root@ha-master ~]
# ll epel-release-latest-6.noarch.rpm
-rw-rw-r-- 1 root root 14540 Nov 5 2012 epel-release-latest-6.noarch.rpm
[root@ha-master ~]
# yum install -y epel-release
[root@ha-master ~]
# rpm -ivh epel-release-latest-6.noarch.rpm --force
[root@ha-master ~]
# yum install -y heartbeat* libnet
2) 配置heartbeat (HA主備兩個節點都要操做)
安裝完heartbeat後系統會生成一個
/etc/ha
.d/目錄,此目錄用於存放heartbeat的有關配置文件。
Heartbeat自帶配置文件的註釋信息較多,在此手工編寫有關配置文件,heartbeat經常使用配置文件有四個,分別是:
ha.cf:heartbeat主配置文件
haresources:本地資源文件
authkeys:認證文件
[root@ha-master ~]
# cd /usr/share/doc/heartbeat-3.0.4/
[root@ha-master heartbeat-3.0.4]
# cp authkeys ha.cf haresources /etc/ha.d/
[root@ha-master heartbeat-3.0.4]
# cd /etc/ha.d/
[root@ha-master ha.d]
# ll
total 56
-rw-r--r-- 1 root root 645 Dec 24 21:37 authkeys
-rw-r--r-- 1 root root 10502 Dec 24 21:37 ha.cf
-rwxr-xr-x 1 root root 745 Dec 3 2013 harc
-rw-r--r-- 1 root root 5905 Dec 24 21:37 haresources
drwxr-xr-x 2 root root 4096 Dec 24 21:28 rc.d
-rw-r--r-- 1 root root 692 Dec 3 2013 README.config
drwxr-xr-x 2 root root 4096 Dec 24 21:28 resource.d
-rw-r--r-- 1 root root 2082 Mar 24 2017 shellfuncs
3) 配置heartbeat的主配置文件ha.cf (HA主備節點配置同樣)
[root@ha-master ha.d]
# pwd
/etc/ha
.d
[root@ha-master ha.d]
# cp ha.cf ha.cf.bak
[root@ha-master ha.d]
# > ha.cf
[root@ha-master ha.d]
# vim ha.cf
debugfile
/var/log/ha-debug
logfile
/var/log/ha-log
#日誌存放位置
#crm yes #是否開啓集羣資源管理功能
logfacility local0
#記錄日誌等級
keepalive 2
#心跳的時間間隔,默認時間單位爲秒
deadtime 5
#超出該時間間隔未收到對方節點的心跳,則認爲對方已經死亡。
warntime 3
#超出該時間間隔未收到對方節點的心跳,則發出警告並記錄到日誌中,但此時不會切換
initdead 10
#在某些系統上,系統啓動或重啓以後須要通過一段時間網絡才能正常工做,該選項用於解決這種狀況產生的時間間隔。取值至少爲deadtime的兩倍。
udpport 694
#設置廣播通訊使用的端口,694爲默認使用的端口號。
bcast eth0
# Linux指定心跳使用以太網廣播方式,並在eth0上進行廣播。"#"後的要徹底刪除,要否則要出錯。
ucast eth0 172.16.60.207
#採用網卡eth0的UDP多播來組織心跳,後面跟的IP地址應該爲雙機中對方的IP地址!!!!!
auto_failback on
#在該選項設爲on的狀況下,一旦主節點恢復運行,則自動獲取資源並取代備用節點。off主節點恢復後變爲備用節點,備用爲主節點!!!!!
#stonith_host * baytech 10.0.0.3 mylogin mysecretpassword
#stonith_host ken3 rps10 /dev/ttyS1 kathy 0
#stonith_host kathy rps10 /dev/ttyS1 ken3 0
#watchdog /dev/watchdog
node ha-master
#主機節點名,可經過"uname -n"查看,默認爲主節點!!!!!
node ha-slave
#備用機節點名,默認爲次節點,要注意順序!!!!
#ping 172.16.60.207 # 選擇ping節點,選擇固定路由做爲節點。ping節點僅用來測試網絡鏈接。通常選擇這行ping測試就行, 下面一行註釋掉.
ping_group group1 172.16.60.204 172.16.60.205
#這個地址並非雙機中的兩個節點地址,而是僅僅用來測試網絡的連通性. 當這兩個IP 都不能ping通時,對方即開始接管資源。
respawn root
/usr/lib64/heartbeat/ipfail
#選配項。其中rootr表示啓動ipfail進程的身份。要確保/usr/lib64/heartbeat/ipfail這個路徑正確(能夠用find命令搜索出來), 不然heartbeat啓動失敗
apiauth ipfail gid=root uid=root
============================舒適提示================================
HA備節點的ha.cf文件只須要將上面配置中的ucast一行內容改成
"ucast eth0 172.16.60.208"
便可, 其餘配置內容和上面HA主節點的ha.cf徹底同樣!
4) 配置heartbeat的認證文件authkeys (HA主備節點配置必須一致)
[root@ha-master ~]
# cd /etc/ha.d/
[root@ha-master ha.d]
# cp authkeys authkeys.bak
[root@ha-master ha.d]
# >authkeys
auth 3
#auth後面指定的數字,下一行必須做爲關鍵字再次出現! 一共有"1", "2","3" 三行, 這裏選擇"3"關鍵字, 選擇"1"和"2"關鍵字也行, HA主備節點必須一致!
#1 crc
#2 sha1 HI!
3 md5 Hello!
必須將該文件受權爲600
[root@ha-master ha.d]
# chmod 600 authkeys
[root@ha-master ha.d]
# ll authkeys
-rw------- 1 root root 20 Dec 25 00:16 authkeys
5) 修改heartbeat的資源文件haresources (HA主備節點配置必須徹底一致)
[root@ha-slave ha.d]
# cp haresources haresources.bak
[root@ha-slave ha.d]
# >haresources
[root@ha-slave ha.d]
# vim haresources # 在文件結尾添加下面一行內容. 因爲該文件默認全是註釋,能夠先清空該文件, 而後添加下面這一行內容
ha-master IPaddr::172.16.60.229
/24/eth0
haproxy
配置說明:
上面設置ha-maser爲主節點, 集羣VIP爲172.16.60.229, haproxy爲所指定須要監視的應用服務.
這樣啓動heartbeat服務的時候, 會自動啓動haproxy服務.
啓動兩個HA節點的heartbeat服務
[root@ha-master ~]
# /etc/init.d/heartbeat start
/etc/init
.d
/heartbeat
: line 55:
/etc/ha
.d
/shellfuncs
: No such
file
or directory
發現啓動heartbeat服務會有如上報錯! 這是由於沒有裝ClusterLabs-resource-agents致使的,
解決辦法:
下載安裝ClusterLabs-resource-agents中間件
下載地址: http:
//linux-ha
.org
/wiki/Downloads
百度下載地址:https:
//pan
.baidu.com
/s/1VNxpl0fUEQstVaPwE_KVbg
提取密碼:wtiy
[root@ha-master src]
# pwd
/usr/local/src
[root@ha-master src]
# ll resource-agents-3.9.6.tar.gz
-rw-rw-r-- 1 root root 617790 Jan 2 12:37 resource-agents-3.9.6.
tar
.gz
[root@ha-master src]
# tar -zvxf resource-agents-3.9.6.tar.gz
[root@ha-master src]
# cd resource-agents-3.9.6
[root@ha-master resource-agents-3.9.6]
# ./autogen.sh
[root@ha-master resource-agents-3.9.6]
# ./configure
[root@ha-master resource-agents-3.9.6]
# make && make install
查看下shellfuncs是否存在了 (實驗時發現上面的插件
make
編譯失敗了, 可是shellfuncs文件也能夠產生,只要產生這個文件就好了)
[root@ha-master resource-agents-3.9.6]
# find / -name shellfuncs
/etc/ha
.d
/shellfuncs
/usr/local/src/resource-agents-3
.9.6
/heartbeat/shellfuncs
啓動兩個HA節點的heartbeat服務
[root@ha-master ~]
# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
[root@ha-master ~]
# ps -ef|grep heartbeat
root 25862 1 0 12:51 ? 00:00:00 heartbeat: master control process
root 25865 25862 0 12:51 ? 00:00:00 heartbeat: FIFO reader
root 25866 25862 0 12:51 ? 00:00:00 heartbeat: write: bcast eth0
root 25867 25862 0 12:51 ? 00:00:00 heartbeat:
read
: bcast eth0
root 25868 25862 0 12:51 ? 00:00:00 heartbeat: write: ucast eth0
root 25869 25862 0 12:51 ? 00:00:00 heartbeat:
read
: ucast eth0
root 25870 25862 0 12:51 ? 00:00:00 heartbeat: write: ping_group group1
root 25871 25862 0 12:51 ? 00:00:00 heartbeat:
read
: ping_group group1
root 25891 25862 0 12:51 ? 00:00:00
/usr/lib64/heartbeat/ipfail
root 26089 1 0 12:51 ? 00:00:00
/bin/sh
/usr/lib/ocf/resource
.d
//heartbeat/IPaddr
start
root 26090 26089 0 12:51 ? 00:00:00
/usr/libexec/heartbeat/send_arp
-i 500 -r 10 -p
/var/run/resource-agents/send_arp-172
.16.60.229 eth0 172.16.60.229 auto 172.16.60.229 ffffffffffff
root 26153 18919 0 12:51 pts
/0
00:00:00
grep
heartbeat
[root@ha-master ~]
# lsof -i:694
COMMAND PID USER FD TYPE DEVICE SIZE
/OFF
NODE NAME
heartbeat 25866 root 7u IPv4 572995 0t0 UDP *:ha-cluster
heartbeat 25867 root 7u IPv4 572995 0t0 UDP *:ha-cluster
heartbeat 25868 root 7u IPv4 573001 0t0 UDP *:ha-cluster
heartbeat 25869 root 7u IPv4 573001 0t0 UDP *:ha-cluster
|
5) HA高可用故障切換測試shell
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
|
1) 當HA主節點的heartbeat服務啓動後, 會發現主節點的haproxy服務也會被自帶啓動起來的!
這是由於在
/etc/ha
.d
/haresources
文件裏配置了haproxy服務的監控了,主節點此時佔有vip資源,即接管服務!
[root@ha-master ~]
# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
[root@ha-master ~]
# ps -ef|grep heartbeat
root 23215 1 0 14:11 ? 00:00:00 heartbeat: master control process
root 23218 23215 0 14:11 ? 00:00:00 heartbeat: FIFO reader
root 23219 23215 0 14:11 ? 00:00:00 heartbeat: write: bcast eth0
root 23220 23215 0 14:11 ? 00:00:00 heartbeat:
read
: bcast eth0
root 23221 23215 0 14:11 ? 00:00:00 heartbeat: write: ucast eth0
root 23222 23215 0 14:11 ? 00:00:00 heartbeat:
read
: ucast eth0
root 23223 23215 0 14:11 ? 00:00:00 heartbeat: write: ping_group group1
root 23224 23215 0 14:11 ? 00:00:00 heartbeat:
read
: ping_group group1
root 23246 10014 0 14:11 pts
/1
00:00:00
grep
heartbeat
[root@ha-master ~]
# lsof -i:694
COMMAND PID USER FD TYPE DEVICE SIZE
/OFF
NODE NAME
heartbeat 23219 root 7u IPv4 391522 0t0 UDP *:ha-cluster
heartbeat 23220 root 7u IPv4 391522 0t0 UDP *:ha-cluster
heartbeat 23221 root 7u IPv4 391528 0t0 UDP *:ha-cluster
heartbeat 23222 root 7u IPv4 391528 0t0 UDP *:ha-cluster
[root@ha-master ~]
# ps -ef|grep haproxy
nobody 26150 1 0 12:51 ? 00:00:00
/usr/sbin/haproxy
-D -f
/etc/haproxy/haproxy
.cfg -p
/var/run/haproxy
.pid
root 26178 18919 0 12:54 pts
/0
00:00:00
grep
haproxy
此時vip資源也在HA主節點上
[root@ha-master ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link
/ether
00:50:56:ac:5b:56 brd ff:ff:ff:ff:ff:ff
inet 172.16.60.208
/24
brd 172.16.60.255 scope global eth0
inet 172.16.60.229
/24
brd 172.16.60.255 scope global secondary eth0:0
inet6 fe80::250:56ff:feac:5b56
/64
scope link
valid_lft forever preferred_lft forever
可是HA備節點的heartbeat服務啓動後, 備節點的haproxy服務並無被自帶啓動!
由於此時vip在HA主節點那邊,備節點此時沒有接管服務。
[root@ha-slave ~]
# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
[root@ha-slave ~]
# ps -ef|grep haproxy
root 23250 10014 0 14:12 pts
/1
00:00:00
grep
haproxy
[root@ha-slave ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link
/ether
00:50:56:ac:05:b5 brd ff:ff:ff:ff:ff:ff
inet 172.16.60.207
/24
brd 172.16.60.255 scope global eth0
inet6 fe80::250:56ff:feac:5b5
/64
scope link
valid_lft forever preferred_lft forever
2) 關閉HA主節點的heartbeat服務, 會發現主節點的haproxy服務也會被自動關閉,而且VIP資源也被轉移到HA備節點上。
HA備節點自動接管VIP資源,且haproxy服務也自動起來。
[root@ha-master ~]
# /etc/init.d/heartbeat stop #必須這種方式關閉heartbeat服務, 纔會自動haproxy服務, 實現VIP資源的轉移
Stopping High-Availability services: Done.
[root@ha-master ~]
# ps -ef|grep heartbeat
root 28094 18919 0 14:16 pts
/0
00:00:00
grep
heartbeat
[root@ha-master ~]
# lsof -i:694
[root@ha-master ~]
# ps -ef|grep haproxy
root 28097 18919 0 14:16 pts
/0
00:00:00
grep
haproxy
[root@ha-master ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link
/ether
00:50:56:ac:5b:56 brd ff:ff:ff:ff:ff:ff
inet 172.16.60.208
/24
brd 172.16.60.255 scope global eth0
inet6 fe80::250:56ff:feac:5b56
/64
scope link
valid_lft forever preferred_lft forever
HA備份節點接管VIP資源,接管服務
[root@ha-slave ~]
# ps -ef|grep haproxy
nobody 24197 1 0 14:16 ? 00:00:00
/usr/sbin/haproxy
-D -f
/etc/haproxy/haproxy
.cfg -p
/var/run/haproxy
.pid
root 24217 10014 0 14:17 pts
/1
00:00:00
grep
haproxy
[root@ha-slave ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link
/ether
00:50:56:ac:05:b5 brd ff:ff:ff:ff:ff:ff
inet 172.16.60.207
/24
brd 172.16.60.255 scope global eth0
inet 172.16.60.229
/24
brd 172.16.60.255 scope global secondary eth0
inet6 fe80::250:56ff:feac:5b5
/64
scope link
valid_lft forever preferred_lft forever
3) 當HA主節點的heartbeat服務從新啓動後,VIP資源就會再次被搶回來, 由於在ha.cf文件裏配置了
"auto_failback on"
[root@ha-master ~]
# /etc/init.d/heartbeat start
Starting High-Availability services: INFO: Resource is stopped
Done.
[root@ha-master ~]
# ps -ef|grep haproxy
nobody 28490 1 0 14:19 ? 00:00:00
/usr/sbin/haproxy
-D -f
/etc/haproxy/haproxy
.cfg -p
/var/run/haproxy
.pid
root 28493 18919 0 14:19 pts
/0
00:00:00
grep
haproxy
[root@ha-master ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link
/ether
00:50:56:ac:5b:56 brd ff:ff:ff:ff:ff:ff
inet 172.16.60.208
/24
brd 172.16.60.255 scope global eth0
inet 172.16.60.229
/24
brd 172.16.60.255 scope global secondary eth0:0
inet6 fe80::250:56ff:feac:5b56
/64
scope link
valid_lft forever preferred_lft forever
HA備份節點失去VIP資源,haproxy服務也被自動關閉
[root@ha-slave ~]
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link
/loopback
00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1
/8
scope host lo
inet6 ::1
/128
scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link
/ether
00:50:56:ac:05:b5 brd ff:ff:ff:ff:ff:ff
inet 172.16.60.207
/24
brd 172.16.60.255 scope global eth0
inet6 fe80::250:56ff:feac:5b5
/64
scope link
valid_lft forever preferred_lft forever
[root@ha-slave ~]
# ps -ef|grep haproxy
root 24460 10014 0 14:20 pts
/1
00:00:00
grep
haproxy
heartbeat的日誌爲
/var/log/ha-log
, 在HA主從節點故障發生VIP資源轉移過程當中能夠觀察ha-log日誌信息
將www.kevin.com解析地址調整到vip地址172.16.60.229, 在故障轉移過程當中, 不會前面客戶端的訪問狀況,基本是無感知的!
以上就實現了heartbeat+haproxy故障轉移的高可用環境~
|