web1 192.168.1.137 sh keninit.sh安裝webhtml
vip 192.168.1.254 此ip是虛擬ipmysql
web2 192.168.1.138 sh keninit.sh安裝weblinux
主mysqlA 192.168.1.140 主c++
從mysqlB 192.168.1.141 從git
從mysqlC 192.168.1.145 從github
Atlas1 192.168.1.146 Atlas+keepalive+lvsweb
Atlas2 192.168.1.144 Atlas+keepalive+lvssql
實驗環境:六臺機器所有是centos6.5的環境數據庫
一、如下三臺數據庫機器140mysqlA,141mysqlB和145mysqlC操做如出一轍vim
yum install -y gcc* gcc-c++* autoconf* automake* zlib* libxml* ncurses-devel* libgcrypt* libtool*
wget http://www.cmake.org/files/v2.8/cmake-2.8.4.tar.gz --no-check-certificate
tar -zxvf cmake-2.8.4.tar.gz
cd cmake-2.8.4.tar.gz
./configure && make && make install
mkdir /opt/mysql
mkdir /opt/mysql/data
groupadd mysql
useradd -g mysql mysql
chown mysql:mysql -R /opt/mysql/data
wget http://downloads.mysql.com/archives/get/file/mysql-5.5.13.tar.gz
tar -zxvf mysql-5.5.13.tar.gz
cd mysql-5.5.13.tar.gz
cmake -DCMAKE_INSTALL_PREFIX=/opt/mysql \
-DSYSCONFDIR=/opt/mysql/etc \
-DMYSQL_DATADIR=/opt/mysql/data \
-DMYSQL_TCP_PORT=3306 \
-DMYSQL_UNIX_ADDR=/tmp/mysqld.sock \
-DMYSQL_USER=mysql \
-DEXTRA_CHARSETS=all \
-DWITH_READLINE=1 \
-DWITH_SSL=system \
-DWITH_EMBEDDED_SERVER=1 \
-DENABLED_LOCAL_INFILE=1 \
-DWITH_INNOBASE_STORAGE_ENGINE=1
make && make install
mkdir /opt/mysql/log
mkdir /opt/mysql/etc
cp support-files/my-medium.cnf /opt/mysql/etc/my.cnf
chmod 755 scripts/mysql_install_db
scripts/mysql_install_db --user=mysql --basedir=/opt/mysql/ --datadir=/opt/mysql/data/
mkdir /opt/mysql/init.d
cp support-files/mysql.server /opt/mysql/init.d/mysql
chmod +x /opt/mysql/init.d/mysql
/opt/mysql/init.d/mysql start
find / -name mysqld.sock
/tmp/mysqld.sock
/opt/mysql/bin/mysql -S /tmp/mysqld.sock -P 3306
/opt/mysql/bin/mysqladmin -u root password '123456' #設置mysql數據庫密碼
/opt/mysql/bin/mysql -uroot -p'123456' #登錄mysql數據庫
[root@localhost mysql-5.5.13]# grep -Ev "^#|^$" /opt/mysql/etc/my.cnf
[client]
port = 3306
socket = /tmp/mysqld.sock
[mysqld]
port = 3306
socket = /tmp/mysqld.sock
basedir = /opt/mysql/ ############報錯的解決方法
datadir = /opt/mysql/data/ ###########報錯的解決方法
skip-external-locking
key_buffer_size = 16M
max_allowed_packet = 1M
table_open_cache = 64
sort_buffer_size = 512K
net_buffer_length = 8K
read_buffer_size = 256K
read_rnd_buffer_size = 512K
myisam_sort_buffer_size = 8M
log-bin=mysql-bin
binlog_format=mixed
server-id = 1
[mysqldump]
quick
max_allowed_packet = 16M
[mysql]
no-auto-rehash
[myisamchk]
key_buffer_size = 20M
sort_buffer_size = 20M
read_buffer = 2M
write_buffer = 2M
[mysqlhotcopy]
interactive-timeout
1、在192.168.1.140主mysqlA裏
[root@localhost ~]# /opt/mysql/bin/mysql -uroot -p'123456'
mysql> GRANT REPLICATION SLAVE ON *.* to 'rep1'@'192.168.1.141' identified by "123456"; #給mysqlB受權
mysql> GRANT REPLICATION SLAVE ON *.* to 'rep1'@'192.168.1.145' identified by "123456";
mysql> grant all on *.* to per1@'192.168.1.146' identified by '123456'; #全部主從都要給Atlas數據庫受權,否則提示down的狀態
mysql> flush privileges;
mysql> show master status;
+------------------+----------+--------------+------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+------------------+----------+--------------+------------------+
| mysql-bin.000003 | 699 | | |
+------------------+----------+--------------+------------------+
##記錄下 FILE 及 Position 的值,在後面進行從服務器操做的時候須要用到。
2、在192.168.1.141/145mysqlB/C從裏
[root@localhost ~]# /opt/mysql/bin/mysql -uroot -p'123456'
mysql> grant all on *.* to per1@'192.168.1.146' identified by '123456'; #全部主從都要給Atlas數據庫受權,否則提示down的狀態
mysql> flush privileges;
[root@localhost ~]# grep server-id /opt/mysql/etc/my.cnf
server-id = 10 ###145的改成server-id =9
#server-id = 2
[root@localhost ~]# /opt/mysql/init.d/mysql restart
Shutting down MySQL. [肯定]
Starting MySQL.. [肯定]
[root@localhost ~]# /opt/mysql/bin/mysql -uroot -p'123456'
mysql> change master to master_host="192.168.1.140", master_user="rep1", master_password="123456", master_log_file="mysql-bin.000003", master_log_pos=699;
mysql> start slave;
Query OK, 0 rows affected (0.01 sec) ####注意防火牆和selinux都關掉或者開放相應的端口
mysql> show slave status\G
Slave_IO_State: Waiting for master to send event
Master_Host: 192.168.1.140
Master_User: rep1
Master_Port: 3306
Connect_Retry: 60
Master_Log_File: mysql-bin.000005
Read_Master_Log_Pos: 107
Relay_Log_File: localhost-relay-bin.000005
Relay_Log_Pos: 253
Relay_Master_Log_File: mysql-bin.000005
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
Replicate_Do_DB:
Replicate_Ignore_DB:
Replicate_Do_Table:
Replicate_Ignore_Table:
Replicate_Wild_Do_Table:
Replicate_Wild_Ignore_Table:
Last_Errno: 0
Last_Error:
Skip_Counter: 0
Exec_Master_Log_Pos: 107
Relay_Log_Space: 413
Until_Condition: None
Until_Log_File:
Until_Log_Pos: 0
Master_SSL_Allowed: No
Master_SSL_CA_File:
Master_SSL_CA_Path:
Master_SSL_Cert:
Master_SSL_Cipher:
Master_SSL_Key:
Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
Last_IO_Errno: 0
Last_IO_Error:
Last_SQL_Errno: 0
Last_SQL_Error:
Replicate_Ignore_Server_Ids:
Master_Server_Id: 1
1 row in set (0.00 sec)
3、驗證:在主192.168.1.140裏
[root@localhost log]# /opt/mysql/bin/mysql -uroot -p'123456'
mysql> create database mydatabases;
Query OK, 1 row affected (0.00 sec)
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| first_db |
| mydatabases |
| mysql |
| performance_schema |
| test |
+--------------------+
6 rows in set (0.00 sec)
在從數據庫裏192.168.1.141裏查看
[root@localhost log]# /opt/mysql/bin/mysql -uroot -p'123456'
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| first_db |
| mydatabases |
| mysql |
| performance_schema |
| test |
+--------------------+
6 rows in set (0.00 sec)
OK,至此主從同步完成。
4、如下是常見的主從錯誤
1.網絡不通
2.密碼不對
3.pos不正確
4.ID問題
5.防火牆開端口和selinux關掉
補充:
雙方的機器防火牆是否策略有限制。
ID的問題,在安裝完mysql數據庫的時候默認他們的server-id=1 可是在作主從同步的時候須要將ID 號碼設置不同才行。
2、安裝Atlas
1、安裝atlas
yum install -y pkg-config libevent* glib lua
wget ftp://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz && tar zxvf autoconf-2.69.tar.gz && cd autoconf-2.69 && ./configure && make && make install
wget http://ftp.gnu.org/gnu/automake/automake-1.13.2.tar.gz && tar zxvf automake-1.13.2.tar.gz && cd automake-1.13.2 && ./configure && make && make install
yum install -y libffi-devel
wget http://ftp.gnome.org/pub/gnome/sources/glib/2.36/glib-2.36.3.tar.xz && tar xvf glib-2.36.3.tar.xz && cd glib-2.36.3 && ./configure
export PKG_CONFIG_PATH=/usr/bin/pkg-config
make && make install
將Atlas包下載到本地,再從本地上傳到服務器裏
wget --no-check-certificate https://github.com/Qihoo360/Atlas/releases/download/2.2.1/Atlas-2.2.1.el6.x86_64.rpm
2、修改test.cnf配置文件
vim /usr/local/mysql-proxy/conf/test.cnf
[root@localhost conf]# /usr/local/mysql-proxy/bin/encrypt 123456
/iZxz+0GRoA= #生成密鑰
[root@localhost conf]# cat test.cnf
[mysql-proxy]
#帶#號的爲非必需的配置項目
#管理接口的用戶名
admin-username = user
#管理接口的密碼
admin-password = pwd
#實現管理接口的Lua腳本所在路徑
admin-lua-script = /usr/local/mysql-proxy/lib/mysql-proxy/lua/admin.lua
#Atlas後端鏈接的MySQL主庫的IP和端口,可設置多項,用逗號分隔
proxy-backend-addresses = 192.168.1.140:3306
#Atlas後端鏈接的MySQL從庫的IP和端口,@後面的數字表明權重,用來做負載均衡,若省略則默認爲1,可設置多項,用逗號分隔
proxy-read-only-backend-addresses = 192.168.1.141:3306@1,192.168.1.145:3306@1
#用戶名與其對應的加密過的MySQL密碼,密碼使用PREFIX/bin目錄下的加密程序encrypt加密
pwds = root:/iZxz+0GRoA= ##和上面的密鑰對應
#設置Atlas的運行方式,設爲true時爲守護進程方式,設爲false時爲前臺方式,通常開發調試時設爲false,線上運行時設爲true
daemon = true
#設置Atlas的運行方式,設爲true時Atlas會啓動兩個進程,一個爲monitor,一個爲worker,monitor在worker意外退出後會自動將其重啓,設爲false時只有worker,沒有monitor,通常開發調試時設爲false,線上運行時設爲true
keepalive = true
#工做線程數,推薦設置與系統的CPU核數相等
event-threads = 4
#日誌級別,分爲message、warning、critical、error、debug五個級別
log-level = message
#日誌存放的路徑
log-path = /usr/local/mysql-proxy/log #日誌查找的地方
#SQL日誌的開關,可設置爲OFF、ON、REALTIME,OFF表明不記錄SQL日誌,ON表明記錄SQL日誌,REALTIME表明記錄SQL日誌且實時寫入磁盤,默認爲OFF
#sql-log = OFF
#實例名稱,用於同一臺機器上多個Atlas實例間的區分
instance = test
#Atlas監聽的工做接口IP和端口
proxy-address = 0.0.0.0:1234
#Atlas監聽的管理接口IP和端口
admin-address = 0.0.0.0:2345
#分表設置,此例中person爲庫名,mt爲表名,id爲分表字段,3爲子表數量,可設置多項,以逗號分隔,若不分表則不須要設置該項
#tables = person.mt.id.3
#默認字符集,若不設置該項,則默認字符集爲latin1
charset = utf8
#容許鏈接Atlas的客戶端的IP,能夠是精確IP,也能夠是IP段,以逗號分隔,若不設置該項則容許全部IP鏈接,不然只容許列表中的IP鏈接
#client-ips = 127.0.0.1, 192.168.1
#Atlas前面掛接的LVS的物理網卡的IP(注意不是虛IP),如有LVS且設置了client-ips則此項必須設置,不然能夠不設置
#lvs-ips = 192.168.1.1
3、啓動Atlas
/usr/local/mysql-proxy/bin/mysql-proxyd test start
4、查看Atlas進程
ps aux | grep mysql-proxy | grep -v grep
5、在mysqlA、B、C主和從數據庫裏查看Atlas
/opt/mysql/bin/mysql -uuser -ppwd -h192.168.1.144 -P2345 #查看Atlas2
mysql> select * from backends;
+-------------+--------------------+-------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+-------+------+
| 1 | 192.168.1.140:3306 | up | rw |
| 2 | 192.168.1.141:3306 | up | ro |
| 3 | 192.168.1.145:3306 | up | ro |
+-------------+--------------------+-------+------+
3 rows in set (0.00 sec)
/opt/mysql/bin/mysql -uuser -ppwd -h192.168.1.146 -P2345 #查看Atlas1
mysql> select * from backends;
+-------------+--------------------+-------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+-------+------+
| 1 | 192.168.1.140:3306 | up | rw |
| 2 | 192.168.1.141:3306 | up | ro |
| 3 | 192.168.1.145:3306 | up | ro |
+-------------+--------------------+-------+------+
3 rows in set (0.00 sec)
mysql> remove backend 3; ###下線數據庫
Empty set (0.00 sec)
mysql> select * from backends;
+-------------+--------------------+-------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+-------+------+
| 1 | 192.168.1.140:3306 | up | rw |
| 2 | 192.168.1.141:3306 | up | ro |
+-------------+--------------------+-------+------+
2 rows in set (0.00 sec)
mysql> add slave 192.168.1.145:3306 ###上線數據庫
-> ;
Empty set (0.00 sec)
mysql> select * from backends;
+-------------+--------------------+-------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+-------+------+
| 1 | 192.168.1.140:3306 | up | rw |
| 2 | 192.168.1.141:3306 | up | ro |
| 3 | 192.168.1.145:3306 | up | ro |
+-------------+--------------------+-------+------+
3 rows in set (0.00 sec)
mysql> set online 3;
+-------------+--------------------+---------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+---------+------+
| 3 | 192.168.1.145:3306 | unknown | ro |
+-------------+--------------------+---------+------+
1 row in set (0.00 sec)
mysql> select * from backends;
+-------------+--------------------+-------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+-------+------+
| 1 | 192.168.1.140:3306 | up | rw |
| 2 | 192.168.1.141:3306 | up | ro |
| 3 | 192.168.1.145:3306 | up | ro |
+-------------+--------------------+-------+------+
3 rows in set (0.00 sec)
6、在192.168.1.145從mysqlc機器上
#模擬故障
[root@localhost log]# netstat -utnalp | grep mysql
tcp 0 0 0.0.0.0:3306 0.0.0.0:* LISTEN 4284/mysqld
tcp 0 0 192.168.1.145:54292 192.168.1.140:3306 ESTABLISHED 4284/mysqld
[root@localhost log]# kill 4284
[root@localhost log]# netstat -utnalp | grep mysql
[root@localhost log]# ps aux | grep mysql
root 4691 0.0 0.1 103256 856 pts/0 S+ 15:50 0:00 grep mysql
[root@localhost log]# /opt/mysql/bin/mysql -uuser -ppwd -h192.168.1.144 -P2345
mysql> select * from backends;
+-------------+--------------------+-------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+-------+------+
| 1 | 192.168.1.140:3306 | up | rw |
| 2 | 192.168.1.141:3306 | up | ro |
| 3 | 192.168.1.145:3306 | down | ro |
###能夠看到192.168.1.145已經down了,這個要把Atlas的配置文件
vim /usr/local/mysql-proxy/conf/test.cnf裏面的從數據庫的優先級調成同樣時看的更快。
#proxy-read-only-backend-addresses = 192.168.1.141:3306@1,192.168.1.145:3306@1
其中@1爲優先級
將192.168.1.145數據庫啓動
[root@localhost log]# /opt/mysql/init.d/mysql restart ###恢復故障
Shutting down MySQL. [肯定]
Starting MySQL.. [肯定]
[root@localhost log]# /opt/mysql/bin/mysql -uuser -ppwd -h192.168.1.144 -P2345
mysql> select * from backends;
+-------------+--------------------+-------+------+
| backend_ndx | address | state | type |
+-------------+--------------------+-------+------+
| 1 | 192.168.1.140:3306 | up | rw |
| 2 | 192.168.1.141:3306 | up | ro |
| 3 | 192.168.1.145:3306 | up | ro |
+-------------+--------------------+-------+------+ ####能夠看到已經OK.
3 rows in set (0.00 sec)
7、鏈接本身主從數據庫的命令
[root@localhost bin]# /opt/mysql/bin/mysql -uroot -p123456
[root@localhost bin]# /opt/mysql/init.d/mysql restart/stop/start
3、如下是架構的搭建
Mysql主從:一主三從
Atlas:2個節點作HA
Keepalived:提供VIP防止Atlas單點故障
1、安裝keepalived
wget http://www.keepalived.org/software/keepalived-1.2.13.tar.gz
tar zxvf keepalived-1.2.13.tar.gz -C /opt/
cd /opt/keepalived-1.2.13
./configure --prefix=/usr/local/keeplived
make && make install
2、新建一個配置文件,默認狀況下keepalived啓動時會去/etc/keepalived
[root@localhost ~]# mkdir -p /etc/keepalived
[root@localhost ~]# cp -r /usr/local/keeplived/etc/keepalived/keepalived.conf /etc/keepalived/
3、編輯配置文件
#Atlas1(1.146)上
[root@localhost ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
837337164@qq.com
}
notification_email_from 837337164@qq.com
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id Atlas_ha
}
vrrp_instance Atlas_ha {
state master
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.1.254
}
}
virtual_server 192.168.1.254 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.1.137 80 {
weight 1
notify_down /usr/local/mysql-proxy/bin/Atlas.sh
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.1.138 80 {
weight 1
notify_down /usr/local/mysql-proxy/bin/Atlas.sh
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
#Atlas2(1.144)與atlas1幾乎一致,以下:
[root@localhost ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
837337164@qq.com
}
notification_email_from 837337164@qq.com
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id Atlas_ha
}
vrrp_instance Atlas_ha {
state BACKUP
interface eth0
virtual_router_id 51
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.1.254
}
}
virtual_server 192.168.1.254 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.1.137 80 {
weight 1
notify_down /usr/local/mysql-proxy/bin/Atlas.sh
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.1.138 80 {
weight 1
notify_down /usr/local/mysql-proxy/bin/Atlas.sh
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
4、建立notify_down腳本
[root@localhost bin]# cat /usr/local/mysql-proxy/bin/Atlas.sh
#!/bin/sh
pkill keepalived
[root@localhost bin]# chmod +x /usr/local/mysql-proxy/bin/Atlas.sh
啓動keepalived
[root@localhost sbin]# /usr/local/keeplived/sbin/keepalived -D
[root@localhost sbin]# ps aux | grep keepalived | grep -v grep
root 5151 0.0 0.1 39980 776 ? Ss 17:48 0:00 /usr/local/keeplived/sbin/keepalived -D
root 5152 0.1 0.4 42084 2068 ? S 17:48 0:00 /usr/local/keeplived/sbin/keepalived -D
root 5153 0.2 0.2 42084 1296 ? S 17:48 0:00 /usr/local/keeplived/sbin/keepalived -D
中止keepalived
[root@localhost sbin]# kill 5151
4、查看keepalived日誌,解決問題思路
tail -f /var/log/messages
4、在192.168.1.146(Atlas1)和192.168.1.144(Atlas2)上分別安裝LVS
[root@localhost ~]# yum install -y ipvsadm
[root@localhost ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.1.254:80 rr persistent 50
-> 192.168.1.137:80 Route 1 0 0
-> 192.168.1.138:80 Route 1 0 0
[root@localhost ~]# ip addr list #用這個指令能夠看到vip的192.168.1.254在那臺Atlas上
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
3: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:ec:ff:f2 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.146/24 brd 192.168.1.255 scope global eth0
inet 192.168.1.254/32 scope global eth0 ###虛擬ip
inet6 fe80::20c:29ff:feec:fff2/64 scope link
valid_lft forever preferred_lft forever
5、在192.168.1.137(web1)和192.168.1.138(web2)服務器裏安裝虛擬vip的ip腳本
1、安裝腳本
[root@localhost ~]# cat lvs-client.sh
. /etc/rc.d/init.d/functions
VIP=(
192.168.1.254
)
function start(){
for ((i=0;i<`echo ${#VIP[*]}`;i++))
do
echo ${i} ${VIP[$i]}
ifconfig lo:${i} ${VIP[$i]} netmask 255.255.255.255 up
route add -host ${VIP[$i]} dev lo
done
echo "1">/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2">/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1">/proc/sys/net/ipv4/conf/all/arp_announce
echo "2">/proc/sys/net/ipv4/conf/all/arp_announce
}
function stop(){
for ((i=0;i<${#VIP[*]};i++))
do
echo ${i} ${VIP[$i]}
ifconfig lo:${i} ${VIP[$i]} netmask 255.255.255.255 up
route del -host ${VIP[$i]} dev lo:${i}
done
}
case "$1" in
start)
start
exit
;;
stop)
stop
exit
;;
*)
echo "You must use $0:stop|start"
;;
esac
chmod +x lvs-client.sh
sh lvs-client.sh start
[root@localhost ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 00:0C:29:20:F6:A4
inet addr:192.168.1.137 Bcast:192.168.1.255 Mask:255.255.255.0
inet6 addr: fe80::20c:29ff:fe20:f6a4/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:115144 errors:0 dropped:0 overruns:0 frame:0
TX packets:1912 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:10232034 (9.7 MiB) TX bytes:131917 (128.8 KiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:16 errors:0 dropped:0 overruns:0 frame:0
TX packets:16 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:960 (960.0 b) TX bytes:960 (960.0 b)
lo:0 Link encap:Local Loopback
inet addr:192.168.1.254 Mask:255.255.255.255 ####能夠看到虛擬網卡已經有了
UP LOOPBACK RUNNING MTU:16436 Metric:1
[root@localhost ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.1.254 0.0.0.0 255.255.255.255 UH 0 0 0 lo
192.168.1.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 eth0
2、編寫測試文件
[root@localhost ~]# cat /var/www/html/index.html
<h1>RS1</h1>
[root@localhost ~]#firefox &
[root@localhost ~]# scp -r lvs-client.sh root@192.168.1.138:/root
在web2,192.168.1.138上
[root@localhost ~]# sh lvs-client.sh start
[root@localhost ~]#ifconfig
[root@localhost ~]#route -n
[root@localhost ~]# cat /var/www/html/index.html
<h1>RS2</h1>
[root@localhost ~]#firefox &
6、模擬故障
1、中止192.168.1.138上面的http
service httpd stop
2、在192.168.1.146Atlas1上面查看
[root@localhost ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.1.254:80 rr persistent 50
-> 192.168.1.137:80 Route 1 0 1
3、測試,如今192.168.1.144Atlas2
[root@localhost ~]# curl http://192.168.1.254
<h1>RS1</h1>
4、重啓192.168.1.138的http
service httpd start
在看192.168.1.146Atlas1
[root@localhost ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.1.254:80 rr persistent 50
-> 192.168.1.137:80 Route 1 0 0
-> 192.168.1.138:80 Route 1 0 0
5、關閉192.168.1.146Atlas1上的keepalived
[root@localhost ~]# ps aux | grep keepalived | grep -v grep
root 8173 0.0 0.1 39980 772 ? Ss 14:14 0:00 /usr/local/keeplived/sbin/keepalived -D
root 8174 0.0 0.4 42208 2080 ? S 14:14 0:00 /usr/local/keeplived/sbin/keepalived -D
root 8175 0.0 0.2 42084 1296 ? S 14:14 0:00 /usr/local/keeplived/sbin/keepalived -D
[root@localhost ~]# kill 8173
在192.168.1.144Atlas2上面查看
[root@localhost ~]# /usr/local/keeplived/sbin/keepalived -D
[root@localhost ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.1.254:80 rr persistent 50
-> 192.168.1.137:80 Route 1 0 0
-> 192.168.1.138:80 Route 1 0 0
[root@localhost ~]# ip addr list
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:bc:de:b4 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.144/24 brd 192.168.1.255 scope global eth0
inet 192.168.1.254/32 scope global eth0 ###此時192.168.1.254VIP已經被Atlas2拿到了。
inet6 fe80::20c:29ff:febc:deb4/64 scope link
valid_lft forever preferred_lft forever
在192.168.1.146Atlas1上
[root@localhost ~]# curl http://192.168.1.254
<h1>RS2</h1> ###能夠訪問