部署mysql高可用、讀寫分離集羣

部署mysql高可用、讀寫分離集羣mysql



架構圖:c++


wKioL1YI-3DDM0xPAAG2csCzomw867.jpg





部署集羣:git

    注意:
github

##Atlas要求mysql版本必須是5.1以上,這裏建議安裝mysql5.6
##mysql5.6軟件下載地址: http://pan.baidu.com/s/1bnrzpZh


    主master和備master安裝DRBD:
sql

        http://732233048.blog.51cto.com/9323668/1665979 數據庫


    主master和備master安裝heartbeat和mysql:
windows

        http://732233048.blog.51cto.com/9323668/1670068 centos


    slave1和slave2安裝mysql:
api

##mysql5.6採用cmake安裝
yum -y install make gcc gcc-c++ cmake bison-devel  ncurses-devel kernel-devel readline-devel openssl-devel openssl zlib zlib-devel pcre-devel perl perl-devel   #安裝依賴包
cd /usr/local/src/
tar -zxf mysql-5.6.22.tar.gz
cd mysql-5.6.22
mkdir -p /data/mysql/data        #數據目錄
cmake  -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DMYSQL_DATADIR=/data/mysql/data -DSYSCONFDIR=/usr/local/mysql -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DWITH_MEMORY_STORAGE_ENGINE=1 -DWITH_PARTITION_STORAGE_ENGINE=1  -DMYSQL_UNIX_ADDR=/var/lib/mysql/mysql.sock  -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci  -DEXTRA_CHARSETS:STRING=utf8,gbk  -DWITH_DEBUG=0
make              ##時間會好久
make install
groupadd mysql
useradd -s /sbin/nologin -g mysql mysql
/usr/local/mysql/scripts/mysql_install_db --basedir=/usr/local/mysql --datadir=/data/mysql/data --defaults-file=/usr/local/mysql/my.cnf --user=mysql
chown -R mysql.mysql /data/mysql
##修改配置文件:
mv /usr/local/mysql/my.cnf /usr/local/mysql/my.cnf.old
vi /usr/local/mysql/my.cnf
[mysqld]
basedir = /usr/local/mysql
datadir = /data/mysql/data
port = 3306
socket = /var/lib/mysql/mysql.sock
pid-file = /var/lib/mysql/mysql.pid
default_storage_engine = InnoDB
expire_logs_days = 14
max_binlog_size = 1G
binlog_cache_size = 10M
max_binlog_cache_size = 20M
slow_query_log
long_query_time = 2
slow_query_log_file = /data/mysql/logs/slowquery.log
open_files_limit = 65535
innodb = FORCE
innodb_buffer_pool_size = 4G
innodb_log_file_size = 1G
query_cache_size = 0
thread_cache_size = 64
table_definition_cache = 512
table_open_cache = 512
max_connections = 1000
sort_buffer_size = 10M
max_allowed_packet = 6M
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES

[mysqld_safe]
log-error = /data/mysql/logs/error.log
  
[client]
socket = /var/lib/mysql/mysql.sock
port = 3306
 
##innodb_buffer_pool_size:
    主要做用是緩存innodb表的索引,數據,插入數據時的緩衝;
    默認值:128M;
    專用mysql服務器設置此值的大小: 系統內存的70%-80%最佳。
    若是你的系統內存不大,查看這個參數,把它的值設置小一點吧(若值設置大了,啓動會報錯)
##啓動mysql:
cp -a /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld
chkconfig --add mysqld
chkconfig mysqld on
/etc/init.d/mysqld start
netstat -tlnp | grep mysql     ##查看是否啓動
 
vi /etc/profile         ##修改path路徑
##在最後添加:export PATH=$PATH:/usr/local/mysql/bin
source /etc/profile
##建立mysql密碼:
mysqladmin -u root password "123456"


    slave1和slave2配置主從複製:
緩存

##首先先確認哪臺master的drbd是primary狀態##
##主master:
[root@dbm138 ~]# cat /proc/drbd | grep ro
version: 8.3.16 (api:88/proto:86-97)
 0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r-----

##138是primary
##備master:
[root@dbm139 ~]# cat /proc/drbd | grep ro
version: 8.3.16 (api:88/proto:86-97)
 0: cs:Connected ro:Secondary/Primary ds:UpToDate/UpToDate C r-----

##139是secondary
##由上確認:主master138的drbd目前是primary##
##主master:
##開啓二進制文件:
vi /usr/local/mysql/my.cnf
在[mysqld]下添加:
log-bin = /data/mysql/binlog/mysql-binlog      ##二進制文件最好單獨放在一個目錄下
 
mkdir /data/mysql/binlog/                   ##建立日誌目錄
chown -R mysql.mysql /data/mysql
/etc/init.d/mysqld  restart        ##reload好像不可行
##slave1:
vi /usr/local/mysql/my.cnf
在[mysqld]下添加:
server-id = 2
 
/etc/init.d/mysqld  restart        ##必須是restart
##slave2:
vi /usr/local/mysql/my.cnf
在[mysqld]下添加:
server-id = 3
 
/etc/init.d/mysqld  restart        ##必須是restar
##主master:
##給每臺從庫建立一個數據庫帳號(受權)
mysql -uroot -p123456
grant replication slave on *.* to 'slave'@'192.168.247.140' identified by '123456';
grant replication slave on *.* to 'slave'@'192.168.247.141' identified by '123456';
flush privileges;
##主master:
##備份數據:
mysql -uroot -p123456
flush tables with read lock;         #鎖表,只讀
show master status;                  #查看此時的binlog位置和pos值,這個要記錄下來
+---------------------+----------+--------------+------------------+-------------------+
| File                | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+---------------------+----------+--------------+------------------+-------------------+
| mysql-binlog.000010 |      625 |              |                  |                   |
+---------------------+----------+--------------+------------------+-------------------+
1 row in set (0.03 sec)

##打開另一個終端:mysqldump -u root -p123456 --all-databases > /tmp/mysqldump.sql
##回到以前終端:unlock tables;         #解表
##主master:
##拷貝數據到全部從庫:
scp /tmp/mysqldump.sql 192.168.247.140:/tmp/
scp /tmp/mysqldump.sql 192.168.247.141:/tmp/
##salve1和slave2:
##導入數據:
mysql -uroot -p123456 < /tmp/mysqldump.sql
 
##開始同步:
mysql -uroot -p123456
 
##注意:這裏指定的ip是master端的vip 201##
change master to master_host='192.168.247.201',master_user='slave',master_password='123456',master_log_file='mysql-binlog.000010',master_log_pos=625,master_port=3306;

start slave;            ##啓動slave
show slave status\G;            ##查看狀態,兩個YES則正常
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: 192.168.247.201
                  Master_User: slave
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: mysql-binlog.000010
          Read_Master_Log_Pos: 625
               Relay_Log_File: mysql-relay-bin.000002
                Relay_Log_Pos: 286
        Relay_Master_Log_File: mysql-binlog.000010
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
              Replicate_Do_DB: 
          Replicate_Ignore_DB: 
           Replicate_Do_Table: 
       Replicate_Ignore_Table: 
      Replicate_Wild_Do_Table: 
  Replicate_Wild_Ignore_Table: 
                   Last_Errno: 0
                   Last_Error: 
                 Skip_Counter: 0
          Exec_Master_Log_Pos: 625
              Relay_Log_Space: 459
              Until_Condition: None
               Until_Log_File: 
                Until_Log_Pos: 0
           Master_SSL_Allowed: No
           Master_SSL_CA_File: 
           Master_SSL_CA_Path: 
              Master_SSL_Cert: 
            Master_SSL_Cipher: 
               Master_SSL_Key: 
        Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
                Last_IO_Errno: 0
                Last_IO_Error: 
               Last_SQL_Errno: 0
               Last_SQL_Error: 
  Replicate_Ignore_Server_Ids: 
             Master_Server_Id: 1
                  Master_UUID: 95e83a45-668a-11e5-aa2d-000c299d90cb
             Master_Info_File: /data/mysql/data/master.info
                    SQL_Delay: 0
          SQL_Remaining_Delay: NULL
      Slave_SQL_Running_State: Slave has read all relay log; waiting for the slave I/O thread to update it
           Master_Retry_Count: 86400
                  Master_Bind: 
      Last_IO_Error_Timestamp: 
     Last_SQL_Error_Timestamp: 
               Master_SSL_Crl: 
           Master_SSL_Crlpath: 
           Retrieved_Gtid_Set: 
            Executed_Gtid_Set: 
                Auto_Position: 0
1 row in set (0.00 sec)

ERROR: 
No query specified




    主從複製測試:

        測試一:master端切換drbd狀態(主備切換),查看slave端是否仍正常訪問

##此時主master是primary
##主master:
[root@dbm138 ~]# /etc/init.d/heartbeat restart   ##進行了主備切換
##slave1:
[root@localhost ~]# mysql -h 192.168.247.201 -u slave -p123456
Warning: Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 4
Server version: 5.6.22-log Source distribution

Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql>           ##仍可訪問

        測試二:master端進行寫操做,查看可否同步到slave端

##此時備master是primary
##備master:
[root@dbm139 ~]# mysql -p123456
Warning: Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 5
Server version: 5.6.22-log Source distribution

Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> create database ku1;             ##建立ku1
Query OK, 1 row affected (0.05 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
5 rows in set (0.07 sec)
##slave1:
mysql -p123456
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
5 rows in set (0.06 sec)
##slave2:
mysql -p123456
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
5 rows in set (0.06 sec)
##正常##

        測試三:master端主備切換後再寫入

##此時備master是primary
##備master:
/etc/init.d/heartbeat  restart                ##主備切換
##此時主master是primary
##主master:
root@dbm138 ~]# mysql -p123456
Warning: Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 3
Server version: 5.6.22-log Source distribution

Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> show databases;        ##查看ku1是否存在
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
5 rows in set (0.09 sec)

mysql> create database ku2;              ##建立ku2
Query OK, 1 row affected (0.06 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| ku2                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
6 rows in set (0.00 sec)
##slave1:
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| ku2                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
6 rows in set (0.00 sec)
##slave2:
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| ku2                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
6 rows in set (0.00 sec)
##切換後仍可訪問,且數據也同步了





    安裝Atlas:

        參考:https://github.com/Qihoo360/Atlas/blob/master/README_ZH.md

Atlas是由 Qihoo 360,  Web平臺部基礎架構團隊開發維護的一個基於MySQL協議的數據中間層項目。
    它在MySQL官方推出的MySQL-Proxy 0.8.2版本的基礎上,修改了大量bug,添加了不少功能特性。
    目前該項目在360公司內部獲得了普遍應用,不少MySQL業務已經接入了Atlas平臺,天天承載的讀寫請求數達幾十億條。
    主要功能:
        a.讀寫分離
        b.從庫負載均衡
        c.IP過濾
        d.SQL語句黑白名單
        e.自動分表
        f.自動摘除宕機的DB

        首先:全部庫服務器對Atlas端建立數據庫帳號(受權)

            Atlas能夠經過這個帳號和密碼去鏈接全部的數據庫服務器

##此時主master是primary
##主master:
mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.132' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)

mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.133' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)

mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.200' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)

##正常只對vip 247.200受權就能夠了,不過爲了可能出現的問題,這裏對Atlas的真實ip也受權了
##slave1:
mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.132' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)

mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.133' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)

mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.200' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)
##slave2:
mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.132' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)

mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.133' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)

mysql> GRANT ALL PRIVILEGES ON *.* TO 'Atlas'@'192.168.247.200' identified by 'mysql';
Query OK, 0 rows affected (0.00 sec)


        安裝Atlas:(Atlas主,Atlas備)

            軟件下載地址:https://github.com/Qihoo360/Atlas/releases

cd /usr/local/src/
rpm -ivh Atlas-2.2.1.el6.x86_64.rpm
[root@localhost src]# rpm -ql Atlas | grep conf
/usr/local/mysql-proxy/conf/test.cnf
/usr/local/mysql-proxy/lib/mysql-proxy/lua/proxy/auto-config.lua

##Atlas的配置文件:/usr/local/mysql-proxy/conf/test.cnf
##對Atlas的密碼mysql進行加密(上面全部庫的受權用戶)
/usr/local/mysql-proxy/bin/encrypt mysql
TWbz0dlu35U=            ##加密後的密碼

##修改配置文件:
mv /usr/local/mysql-proxy/conf/test.cnf /usr/local/mysql-proxy/conf/test.cnf.old
vi /usr/local/mysql-proxy/conf/test.cnf
[mysql-proxy]
plugins = admin,proxy    #默認插件不用修改
admin-username=admin     #Atlas管理員用戶
admin-password=admin     #Atlas管理員密碼
admin-lua-script = /usr/local/mysql-proxy/lib/mysql-proxy/lua/admin.lua
proxy-backend-addresses = 192.168.247.201:3306    #主庫的IP及端口,這裏是vip 201
proxy-read-only-backend-addresses = 192.168.247.140:3306,192.168.247.141:3306   #從庫的IP及端口
#proxy-read-only-backend-addresses = 192.168.247.140:3306@1,192.168.247.141:3306@2   ##權重爲1和2
pwds = Atlas:TWbz0dlu35U=, root:08xYdWX+7MBR/g==    #Atlas用戶與其對應的加密過的密碼,逗號分隔多個
daemon = true     #設置Atlas的運行方式,線上運行時設爲true
keepalive = true
#設置Atlas的運行方式,設爲true時Atlas會啓動兩個進程,一個爲monitor,一個爲worker,monitor在worker意外退出後會自動將其重啓,設爲false時只有worker,沒有monitor,通常開發調試時設爲false,線上運行時設爲true
event-threads = 4   #工做線程數,對Atlas的性能有很大影響,系統的CPU核數的2至4倍
log-level = message   #日誌級別,分爲message、warning、critical、error、debug五個級別
log-path = /usr/local/mysql-proxy/log   #日誌路徑
instance = test   #實例的名稱
proxy-address = 0.0.0.0:3306   #Atlas監聽的工做接口IP和端口,客戶端鏈接端口
admin-address = 0.0.0.0:2345   #Atlas監聽的管理接口IP和端口
charset = utf8   #默認字符集

##注意:Atlas配置文件中不要出現漢字註釋,不然啓動會有問題##
[mysql-proxy]
plugins = admin,proxy
admin-username=admin
admin-password=admin
admin-lua-script = /usr/local/mysql-proxy/lib/mysql-proxy/lua/admin.lua
proxy-backend-addresses = 192.168.247.201:3306
proxy-read-only-backend-addresses = 192.168.247.140:3306,192.168.247.141:3306
pwds = Atlas:TWbz0dlu35U=
daemon = true
keepalive = true
event-threads = 4
log-level = message
log-path = /usr/local/mysql-proxy/log
instance = test
proxy-address = 0.0.0.0:3306
admin-address = 0.0.0.0:2345
charset = utf8

        啓動Atlas:(Atlas主,Atlas備)

[root@localhost src]# /usr/local/mysql-proxy/bin/mysql-proxyd test start
OK: MySQL-Proxy of test is started
[root@localhost src]# netstat -tlnp | grep 3306
tcp        0      0 0.0.0.0:3306                0.0.0.0:*                   LISTEN      1594/mysql-proxy    
[root@localhost src]# netstat -tlnp | grep 2345
tcp        0      0 0.0.0.0:2345                0.0.0.0:*                   LISTEN      1594/mysql-proxy 

##關閉Atlas:/usr/local/mysql-proxy/bin/mysql-proxyd test stop
##重啓:/usr/local/mysql-proxy/bin/mysql-proxyd test restart
##建立啓動腳本:
vi /etc/init.d/atlas
#!/bin/sh
#
#atlas:    Atlas Daemon
#
# chkconfig:    - 90 25
# description:  Atlas Daemon
#
# Source function library.
start()
{
        echo -n $"Starting atlas: "
        /usr/local/mysql-proxy/bin/mysql-proxyd test start
        echo
}
stop()
{
        echo -n $"Shutting down atlas: "
        /usr/local/mysql-proxy/bin/mysql-proxyd test stop
        echo
}
ATLAS="/usr/local/mysql-proxy/bin/mysql-proxyd"
[ -f $ATLAS ] || exit 1
# See how we were called.
case "$1" in
        start)
                start
                ;;
        stop)
                stop
                ;;
        restart)
                stop
                sleep 3
                start
                ;;
        *)
                echo $"Usage: $0 {start|stop|restart}"
                exit 1
esac
exit 0

chmod +x /etc/init.d/atlas
chkconfig atlas on

        管理Atlas:(Atlas主)

##隨便選擇一臺Atlas,這裏選擇Atlas主##
##隨便找一臺數據庫服務器,如:slave1
##slave1:
mysql -h192.168.247.132 -P2345 -uadmin -padmin           ##登陸Atlas主的管理界面##Atlas主的ip 132,管理端口2345,帳號admin 
mysql> select * from help;         ##查看幫助
+----------------------------+---------------------------------------------------------+
| command                    | description                                             |
+----------------------------+---------------------------------------------------------+
| SELECT * FROM help         | shows this help                                         |
| SELECT * FROM backends     | lists the backends and their state                      |
| SET OFFLINE $backend_id    | offline backend server, $backend_id is backend_ndx's id |
| SET ONLINE $backend_id     | online backend server, ...                              |
| ADD MASTER $backend        | example: "add master 127.0.0.1:3306", ...               |
| ADD SLAVE $backend         | example: "add slave 127.0.0.1:3306", ...                |
| REMOVE BACKEND $backend_id | example: "remove backend 1", ...                        |
| SELECT * FROM clients      | lists the clients                                       |
| ADD CLIENT $client         | example: "add client 192.168.1.2", ...                  |
| REMOVE CLIENT $client      | example: "remove client 192.168.1.2", ...               |
| SELECT * FROM pwds         | lists the pwds                                          |
| ADD PWD $pwd               | example: "add pwd user:raw_password", ...               |
| ADD ENPWD $pwd             | example: "add enpwd user:encrypted_password", ...       |
| REMOVE PWD $pwd            | example: "remove pwd user", ...                         |
| SAVE CONFIG                | save the backends to config file                        |
| SELECT VERSION             | display the version of Atlas                            |
+----------------------------+---------------------------------------------------------+
16 rows in set (0.00 sec)

mysql> SELECT * FROM backends;       ##查看讀寫數據庫ip,端口
+-------------+----------------------+-------+------+
| backend_ndx | address              | state | type |
+-------------+----------------------+-------+------+
|           1 | 192.168.247.201:3306 | up    | rw   |
|           2 | 192.168.247.140:3306 | up    | ro   |
|           3 | 192.168.247.141:3306 | up    | ro   |
+-------------+----------------------+-------+------+


        經過Atlas做爲代理真正去管理全部的數據庫:

##這裏仍以Atlas主和slave1做爲測試##
##slave1:
mysql -h192.168.247.132 -P3306 -uAtlas -pmysql         ##Atlas主的ip 132,代理端口3306,帳號Atlas
mysql> SHOW VARIABLES LIKE 'server_id';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 3     |
+---------------+-------+
1 row in set (0.01 sec)

mysql> SHOW VARIABLES LIKE 'server_id';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 2     |
+---------------+-------+
1 row in set (0.01 sec)

mysql> SHOW VARIABLES LIKE 'server_id';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 3     |
+---------------+-------+
1 row in set (0.00 sec)

mysql> SHOW VARIABLES LIKE 'server_id';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 2     |
+---------------+-------+
1 row in set (0.00 sec)

##SHOW VARIABLES LIKE 'server_id';    會輪訓獲取每臺讀數據庫的server-id

mysql> create database ku3;
Query OK, 1 row affected (0.01 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| ku2                |
| ku3                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
7 rows in set (0.00 sec)

##寫操做也正常





    安裝keepalived實現Atlas高可用:

        參考:http://blog.csdn.net/jibcy/article/details/7826158

              http://bbs.nanjimao.com/thread-855-1-1.ht

        (Atlas主,Atlas備)

##安裝依賴包:
yum -y install make gcc gcc-c++ bison-devel  ncurses-devel kernel-devel readline-devel pcre-devel openssl-devel openssl zlib zlib-devel pcre-devel perl perl-devel
##安裝keepalived:
cd /usr/local/src/
wget http://www.keepalived.org/software/keepalived-1.2.15.tar.gz
tar -zxf keepalived-1.2.15.tar.gz
cd keepalived-1.2.15
./configure --prefix=/usr/local/keepalived
make
make install
 
##拷貝文件:
cp -a /usr/local/keepalived/etc/rc.d/init.d/keepalived  /etc/init.d/
cp -a /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
mkdir /etc/keepalived/
cp -a /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
cp -a /usr/local/keepalived/sbin/keepalived /usr/sbin/
 
##注意: /etc/sysconfig/keepalived 和 /etc/keepalived/keepalived.conf 的路徑必定要正確,
##由於在執行/etc/init.d/keepalived這個啓動腳本時,會讀取/etc/sysconfig/keepalived 和 /etc/keepalived/keepalived.conf 這兩個文件

        (Atlas主)

##修改配置:
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.old
vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    notification_email {
      732233048@qq.com
    }
    notification_email_from root@localhost
    smtp_server 127.0.0.1
    smtp_connect_timeout 30
    router_id Atlas_ha
}
 
vrrp_instance VI_1 {
    state master
    interface eth0
    virtual_router_id 51
    priority 150
    advert_int 1
    nopreempt
 
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.247.200
    }
}
 
virtual_server 192.168.247.200 3306 {
    delay_loop 6
    #lb_algo wrr
    #lb_kind DR
    #persistence_timeout 50
    protocol TCP
    real_server 192.168.247.132 3306 {
        #weight 3
        notify_down /etc/keepalived/keepalived_monitor.sh
        TCP_CHECK {
            connect_timeout 10
            nb_get_retry 3
            delay_before_retry 3
            connect_port 3306
        }
    }
}

        (Atlas備)

##修改配置:
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.old
vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    notification_email {
      732233048@qq.com
    }
    notification_email_from root@localhost
    smtp_server 127.0.0.1
    smtp_connect_timeout 30
    router_id Atlas_ha
}
 
vrrp_instance VI_1 {
    state backup
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    #nopreempt
 
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.247.200
    }
}
 
virtual_server 192.168.247.200 3306 {
    delay_loop 6
    #lb_algo wrr
    #lb_kind DR
    #persistence_timeout 50
    protocol TCP
    real_server 192.168.247.133 3306 {
        #weight 3
        notify_down /etc/keepalived/keepalived_monitor.sh
        TCP_CHECK {
            connect_timeout 10
            nb_get_retry 3
            delay_before_retry 3
            connect_port 3306
        }
    }
}

        (Atlas主,Atlas備)

##建立/etc/keepalived/keepalived_monitor.sh腳本:
vi /etc/keepalived/keepalived_monitor.sh
#!/bin/bash
# check Atlas server status

Atlas_status=`netstat -tlnp  | grep 0.0.0.0:3306 | grep LISTEN | wc -l`

if [ $Atlas_status -eq 0 ];then
  /etc/init.d/atlas start
  sleep 1
  Atlas_status=`netstat -tlnp  | grep 0.0.0.0:3306 | grep LISTEN | wc -l`
  if [ $Atlas_status -eq 0 ];then
    /etc/init.d/keepalived stop
  fi
fi
chmod 755 /etc/keepalived/keepalived_monitor.sh

        (Atlas主,Atlas備)

##修改keepalived的日誌文件##
       參考:http://chenwenming.blog.51cto.com/327092/745316
       說明:
       centos6.3以後的syslog更名叫rsyslog了,默認在 /etc/rsyslog.conf
##修改/etc/sysconfig/keepalived:
vi /etc/sysconfig/keepalived
# Options for keepalived. See `keepalived --help' output and keepalived(8) and
# keepalived.conf(5) man pages for a list of all options. Here are the most
# common ones :
#
# --vrrp               -P    Only run with VRRP subsystem.
# --check              -C    Only run with Health-checker subsystem.
# --dont-release-vrrp  -V    Dont remove VRRP VIPs & VROUTEs on daemon stop.
# --dont-release-ipvs  -I    Dont remove IPVS topology on daemon stop.
# --dump-conf          -d    Dump the configuration data.
# --log-detail         -D    Detailed log messages.
# --log-facility       -S    0-7 Set local syslog facility (default=LOG_DAEMON)
#
 
#KEEPALIVED_OPTIONS="-D"
KEEPALIVED_OPTIONS="-D -d -S 0"               ##在最後添加此行
 
##修改/etc/rsyslog.conf:
vi /etc/rsyslog.conf
##在最後添加此行:
local0.*                                                /var/log/keepalived.log
 
## /etc/init.d/rsyslog restart
Shutting down system logger:                               [  OK  ]
Starting system logger:                                    [  OK  ]

        (Atlas主)

##啓動keepalived:
/etc/init.d/keepalived start
Sep 29 10:58:12 localhost Keepalived[3544]: Starting Keepalived v1.2.15 (09/29,2015)
Sep 29 10:58:12 localhost Keepalived[3545]: Starting Healthcheck child process, pid=3547
Sep 29 10:58:12 localhost Keepalived[3545]: Starting VRRP child process, pid=3548
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Netlink reflector reports IP 192.168.247.132 added
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Initializing ipvs 2.6
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Netlink reflector reports IP fe80::20c:29ff:fe5c:722c added
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Registering Kernel netlink reflector
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Registering Kernel netlink command channel
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Registering gratuitous ARP shared channel
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Opening file '/etc/keepalived/keepalived.conf'.
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Configuration is using : 63286 Bytes
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: ------< Global definitions >------
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  Router ID = Atlas_ha
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  Smtp server = 127.0.0.1
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  Smtp server connection timeout = 30
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  Email notification from = root@localhost
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  Email notification = 732233048@qq.com
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  VRRP IPv4 mcast group = 224.0.0.18
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  VRRP IPv6 mcast group = 224.0.0.18
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: ------< VRRP Topology >------
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:  VRRP Instance = VI_1
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Want State = BACKUP
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Runing on device = eth0
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Gratuitous ARP repeat = 5
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Gratuitous ARP refresh repeat = 1
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Virtual Router ID = 51
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Priority = 150
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Advert interval = 1sec
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Preempt disabled
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Authentication type = SIMPLE_PASSWORD
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Password = 1111
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:    Virtual IP = 1
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]:      192.168.247.200/32 dev eth0 scope global
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: Using LinkWatch kernel netlink reflector...
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) Entering BACKUP STATE
Sep 29 10:58:12 localhost Keepalived_vrrp[3548]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Netlink reflector reports IP 192.168.247.132 added
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Netlink reflector reports IP fe80::20c:29ff:fe5c:722c added
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Registering Kernel netlink reflector
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Registering Kernel netlink command channel
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Opening file '/etc/keepalived/keepalived.conf'.
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Configuration is using : 11723 Bytes
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: IPVS: Scheduler or persistence engine not found
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: IPVS: No such process
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: ------< Global definitions >------
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  Router ID = Atlas_ha
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  Smtp server = 127.0.0.1
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  Smtp server connection timeout = 30
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  Email notification from = root@localhost
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  Email notification = 732233048@qq.com
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  VRRP IPv4 mcast group = 224.0.0.18
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  VRRP IPv6 mcast group = 224.0.0.18
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: ------< SSL definitions >------
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  Using autogen SSL context
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: ------< LVS Topology >------
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  System is compiled with LVS v1.2.1
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  VIP = 192.168.247.200, VPORT = 3306
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    delay_loop = 6, lb_algo = 
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    protocol = TCP
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    alpha is OFF, omega is OFF
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    quorum = 1, hysteresis = 0
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    lb_kind = NAT
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    RIP = 192.168.247.132, RPORT = 3306, WEIGHT = 1
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:      -> Notify script DOWN = /etc/keepalived/keepalived_monitor.sh
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: ------< Health checkers >------
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:  [192.168.247.132]:3306
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    Keepalive method = TCP_CHECK
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    Connection dest = [192.168.247.132]:3306
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]:    Connection timeout = 10
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Using LinkWatch kernel netlink reflector...
Sep 29 10:58:12 localhost Keepalived_healthcheckers[3547]: Activating healthchecker for service [192.168.247.132]:3306
Sep 29 10:58:15 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) Transition to MASTER STATE
Sep 29 10:58:16 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) Entering MASTER STATE
Sep 29 10:58:16 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) setting protocol VIPs.
Sep 29 10:58:16 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) Sending gratuitous ARPs on eth0 for 192.168.247.200
Sep 29 10:58:16 localhost Keepalived_healthcheckers[3547]: Netlink reflector reports IP 192.168.247.200 added
Sep 29 10:58:21 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) Sending gratuitous ARPs on eth0 for 192.168.247.200

##進入master狀態
##綁定vip 200
##檢測ip 132

##設置開機自動啓動
chkconfig keepalived on

        (Atlas備)

##啓動:
/etc/init.d/keepalived start
Sep 29 11:01:55 localhost Keepalived[3274]: Starting Keepalived v1.2.15 (09/29,2015)
Sep 29 11:01:55 localhost Keepalived[3275]: Starting Healthcheck child process, pid=3277
Sep 29 11:01:55 localhost Keepalived[3275]: Starting VRRP child process, pid=3278
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Initializing ipvs 2.6
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Netlink reflector reports IP 192.168.247.133 added
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Netlink reflector reports IP fe80::20c:29ff:fe01:3824 added
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Netlink reflector reports IP 192.168.247.133 added
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Registering Kernel netlink reflector
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Registering Kernel netlink command channel
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Registering gratuitous ARP shared channel
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Opening file '/etc/keepalived/keepalived.conf'.
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Netlink reflector reports IP fe80::20c:29ff:fe01:3824 added
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Registering Kernel netlink reflector
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Registering Kernel netlink command channel
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Opening file '/etc/keepalived/keepalived.conf'.
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Configuration is using : 63266 Bytes
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: ------< Global definitions >------
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  Router ID = Atlas_ha
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  Smtp server = 127.0.0.1
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  Smtp server connection timeout = 30
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  Email notification from = root@localhost
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  Email notification = 732233048@qq.com
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  VRRP IPv4 mcast group = 224.0.0.18
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  VRRP IPv6 mcast group = 224.0.0.18
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: ------< VRRP Topology >------
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:  VRRP Instance = VI_1
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Want State = BACKUP
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Runing on device = eth0
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Gratuitous ARP repeat = 5
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Gratuitous ARP refresh repeat = 1
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Virtual Router ID = 51
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Priority = 100
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Advert interval = 1sec
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Authentication type = SIMPLE_PASSWORD
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Password = 1111
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:    Virtual IP = 1
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]:      192.168.247.200/32 dev eth0 scope global
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: Using LinkWatch kernel netlink reflector...
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Configuration is using : 11703 Bytes
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: VRRP_Instance(VI_1) Entering BACKUP STATE
Sep 29 11:01:55 localhost Keepalived_vrrp[3278]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: IPVS: Scheduler or persistence engine not found
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: IPVS: No such process
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: ------< Global definitions >------
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  Router ID = Atlas_ha
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  Smtp server = 127.0.0.1
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  Smtp server connection timeout = 30
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  Email notification from = root@localhost
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  Email notification = 732233048@qq.com
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  VRRP IPv4 mcast group = 224.0.0.18
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  VRRP IPv6 mcast group = 224.0.0.18
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: ------< SSL definitions >------
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  Using autogen SSL context
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: ------< LVS Topology >------
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  System is compiled with LVS v1.2.1
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  VIP = 192.168.247.200, VPORT = 3306
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    delay_loop = 6, lb_algo = 
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    protocol = TCP
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    alpha is OFF, omega is OFF
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    quorum = 1, hysteresis = 0
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    lb_kind = NAT
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    RIP = 192.168.247.133, RPORT = 3306, WEIGHT = 1
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:      -> Notify script DOWN = /etc/keepalived/keepalived_monitor.sh
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: ------< Health checkers >------
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:  [192.168.247.133]:3306
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    Keepalive method = TCP_CHECK
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    Connection dest = [192.168.247.133]:3306
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]:    Connection timeout = 10
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Using LinkWatch kernel netlink reflector...
Sep 29 11:01:55 localhost Keepalived_healthcheckers[3277]: Activating healthchecker for service [192.168.247.133]:3306

##進入backup狀態
##檢測ip 133

        查看vip綁定在哪臺機器上:

            (Atlas主)

# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:5c:72:2c brd ff:ff:ff:ff:ff:ff
    inet 192.168.247.132/24 brd 192.168.247.255 scope global eth0
    inet 192.168.247.200/32 scope global eth0
    inet6 fe80::20c:29ff:fe5c:722c/64 scope link 
       valid_lft forever preferred_lft forever
       
##vip 200綁定在Atlas主

##設置開機自動啓動
chkconfig keepalived on

            (Atlas備)

# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:01:38:24 brd ff:ff:ff:ff:ff:ff
    inet 192.168.247.133/24 brd 192.168.247.255 scope global eth0
    inet6 fe80::20c:29ff:fe01:3824/64 scope link 
       valid_lft forever preferred_lft forever


        Atlas主備之間測試:

            測試一:把Atlas主的Atlas服務stop掉

##Atlas主:
/etc/init.d/atlas stop

##查看日誌:
Sep 29 11:06:42 localhost Keepalived_healthcheckers[3547]: TCP connection to [192.168.247.132]:3306 failed !!!
Sep 29 11:06:42 localhost Keepalived_healthcheckers[3547]: Removing service [192.168.247.132]:3306 from VS [192.168.247.200]:3306
Sep 29 11:06:42 localhost Keepalived_healthcheckers[3547]: IPVS: Service not defined
Sep 29 11:06:42 localhost Keepalived_healthcheckers[3547]: Executing [/etc/keepalived/keepalived_monitor.sh] for service [192.168.247.132]:3306 in VS [192.168.247.200]:3306
Sep 29 11:06:42 localhost Keepalived_healthcheckers[3547]: Lost quorum 1-0=1 > 0 for VS [192.168.247.200]:3306
Sep 29 11:06:42 localhost Keepalived_healthcheckers[3547]: Remote SMTP server [127.0.0.1]:25 connected.
Sep 29 11:06:43 localhost Keepalived_healthcheckers[3547]: SMTP alert successfully sent.
Sep 29 11:06:48 localhost Keepalived_healthcheckers[3547]: TCP connection to [192.168.247.132]:3306 success.
Sep 29 11:06:48 localhost Keepalived_healthcheckers[3547]: Adding service [192.168.247.132]:3306 to VS [192.168.247.200]:3306
Sep 29 11:06:48 localhost Keepalived_healthcheckers[3547]: IPVS: Service not defined
Sep 29 11:06:48 localhost Keepalived_healthcheckers[3547]: Gained quorum 1+0=1 <= 1 for VS [192.168.247.200]:3306
Sep 29 11:06:48 localhost Keepalived_healthcheckers[3547]: Remote SMTP server [127.0.0.1]:25 connected.
Sep 29 11:06:48 localhost Keepalived_healthcheckers[3547]: SMTP alert successfully sent.

##Atlas服務stop後,keepalived把它從集羣中移除,而後又把它啓動起來,加入集羣

            測試二:把Atlas備的Atlas服務stop掉

##Atlas服務stop後,keepalived把它從集羣中移除,而後又把它啓動起來,加入集羣

            測試三:把Atlas主的keepalived服務stop掉

##Atlas主:
/etc/init.d/keepalived stop

##查看日誌:
Sep 29 11:10:34 localhost Keepalived[3545]: Stopping Keepalived v1.2.15 (09/29,2015)
Sep 29 11:10:34 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) sending 0 priority
Sep 29 11:10:34 localhost Keepalived_vrrp[3548]: VRRP_Instance(VI_1) removing protocol VIPs.
Sep 29 11:10:34 localhost Keepalived_healthcheckers[3547]: Netlink reflector reports IP 192.168.247.200 removed
Sep 29 11:10:34 localhost Keepalived_healthcheckers[3547]: Removing service [192.168.247.132]:3306 from VS [192.168.247.200]:3306
Sep 29 11:10:34 localhost Keepalived_healthcheckers[3547]: IPVS: Service not defined
Sep 29 11:10:34 localhost Keepalived_healthcheckers[3547]: IPVS: No such service

##移除虛擬ip
##Atlas備查看日誌:
Sep 29 11:10:35 localhost Keepalived_vrrp[3278]: VRRP_Instance(VI_1) Transition to MASTER STATE
Sep 29 11:10:36 localhost Keepalived_vrrp[3278]: VRRP_Instance(VI_1) Entering MASTER STATE
Sep 29 11:10:36 localhost Keepalived_vrrp[3278]: VRRP_Instance(VI_1) setting protocol VIPs.
Sep 29 11:10:36 localhost Keepalived_healthcheckers[3277]: Netlink reflector reports IP 192.168.247.200 added
Sep 29 11:10:36 localhost Keepalived_vrrp[3278]: VRRP_Instance(VI_1) Sending gratuitous ARPs on eth0 for 192.168.247.200
Sep 29 11:10:41 localhost Keepalived_vrrp[3278]: VRRP_Instance(VI_1) Sending gratuitous ARPs on eth0 for 192.168.247.200

##變爲master狀態
##綁定vip 200

            測試四:Atlas主的keepalived從新啓動

/etc/init.d/keepalived start

##結果:Atlas主並無變爲master,而是做爲了backup,由於在配置文件中,咱們使用了nopreempt參數,不搶佔(注:只能在優先級高的一端才能配這個參數)

            測試五:把Atlas備的keepalived服務stop掉

##前提是Atlas備此時對外服務
##正常切換

            測試六:把Atlas主的Atlas服務stop掉,並再也不讓其成功啓動

##Atlas主的keepalived被stop掉,正常主備切換







    測試訪問vip 200,來管理數據庫

##此時的兩臺Atlas,一臺對外服務,一臺處於空閒狀態##
##隨便選擇一臺數據庫,如slave2:這裏只是選擇用來登陸,隨便一臺數據庫均可以
##也能夠用windows下的navicat來登陸,ip是200,端口是3306,帳號是Atlas

##slave2:
mysql -h192.168.247.200 -P3306 -uAtlas -pmysql          ##這裏必須是vip 200,端口3306,帳號Atlas
mysql> show databases;         ##查看數據庫
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| ku2                |
| ku3                |
| ku4                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
8 rows in set (0.01 sec)


mysql> SHOW VARIABLES LIKE 'server_id';   ##輪訓獲取讀數據庫的server-id,讀負載均衡正常
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 2     |
+---------------+-------+
1 row in set (0.01 sec)

mysql> SHOW VARIABLES LIKE 'server_id';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 3     |
+---------------+-------+
1 row in set (0.01 sec)

mysql> SHOW VARIABLES LIKE 'server_id';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 2     |
+---------------+-------+
1 row in set (0.01 sec)

mysql> SHOW VARIABLES LIKE 'server_id';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| server_id     | 3     |
+---------------+-------+
1 row in set (0.00 sec)


mysql> create database ku5;        ##建立ku5
Query OK, 1 row affected (0.01 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| ku2                |
| ku3                |
| ku4                |
| ku5                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
9 rows in set (0.00 sec
##到其它任何一臺數據庫查看是否數據同步
##如:slave1:
mysql -p123456
mysql> show databases;       ##數據同步正常
+--------------------+
| Database           |
+--------------------+
| information_schema |
| ku1                |
| ku2                |
| ku3                |
| ku4                |
| ku5                |
| mysql              |
| performance_schema |
| test               |
+--------------------+
9 rows in set (0.01 sec)
##測試:將Atlas進行主備切換,查看訪問200是否仍正常##
相關文章
相關標籤/搜索