- 第一步mariadb主從配置
- 第二步ProxySql讀寫分離
- 第三步keepalived高可用proxysql
- 第四步測試mysql讀寫分離高可用是否成功
第一步mariadb主從配置:
首先配置好mariadb主從同步,這裏採用的一主兩從,node1和node2爲半同步複製,node1和node3爲異步複製,也能夠所有采用異步複製,根據業務需求配置便可php
- 保證各服務器節點時間同步,可參考 時間同步設置方案 http://www.longma.tk/p=629
- 初始化環境 node1和 node2 ,爲保證不受其它實驗干擾,建議恢復至初始狀態,新裝的mariadb便可,不要有其它實驗項目干擾
主服務器node1配置:
1
2
3
4
5
6
7
8
9
10
11
|
配置文件
vim /etc/my.cnf.d/server.cnf
[mysqld]
skip_name_resolve=ON
innodb_file_per_table=ON
server_id=1
log_bin=mysql-bin
systemctl start mariadb.service
mysql> GRANT REPLICATION SLAVE,REPLICATION CLIENT ON *.* TO 'repluser'@'172.18.43.%' IDENTIFIED BY 'replpass';
mysql> FLUSH PRIVILEGES;
|
從服務器node2配置:
1
2
3
4
5
6
7
8
9
10
11
12
13
|
配置文件
vim /etc/my.cnf.d/server.cnf
[mysqld]
skip_name_resolve=ON
innodb_file_per_table=ON
server_id=2
relay_log=relay-log
systemctl start mariadb.service
mysql> CHANGE MASTER TO MASTER_HOST='172.18.43.8',MASTER_USER='repluser',MASTER_PASSWORD='replpass'
,MASTER_LOG_FILE='mysql-bin.000003',MASTER_LOG_POS=#;
#MASTER_LOG_POS=#; #去主節點show master status查看
mysql> START SLAVE; #啓動IO和SQL兩個線程
|
主服務器從服務器配置完成之後驗證一下主從複製:node
1
2
3
|
Node1: CREATE DATABASES mydb;
Node2: SHOW DATABASES; #肯定數據庫是否能夠複製,此時主從複製完成
|
半同步配置:
主節點Node1配置mysql
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
<br />mysql> INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master.so';
mysql> SHOW PLUGINS; #肯定安裝完成
mysql> MariaDB [mydb]> SHOW GLOBAL VARIABLES LIKE 'rpl_semi%';
+------------------------------------+-------+
| Variable_name | Value |
+------------------------------------+-------+
| rpl_semi_sync_master_enabled | OFF |
| rpl_semi_sync_master_timeout | 10000 | #單位毫秒,等待從節點超時時長
| rpl_semi_sync_master_trace_level | 32 | #定義日誌級別
| rpl_semi_sync_master_wait_no_slave | ON | # 沒有從節點要不要等待
+------------------------------------+-------+
mysql> SET GLOBAL rpl_semi_sync_master_enabled=ON;
mysql> SHOW GLOBAL STATUS LIKE '%semi%'; #發下同步客戶端仍是0,須要重啓從節點的IO_THREAD,默認是異步
+--------------------------------------------+-------+
| Variable_name | Value |
+--------------------------------------------+-------+
| Rpl_semi_sync_master_clients | 1 |
| Rpl_semi_sync_master_net_avg_wait_time | 987 |
| Rpl_semi_sync_master_net_wait_time | 987 |
| Rpl_semi_sync_master_net_waits | 1 |
|
從節點Node2配置:sql
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
mysql> INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave.so';
mysql> SHOW PLUGINS; #肯定安裝完成
mysql> SHOW GLOBAL VARIABLES LIKE 'rpl_semi%';
+---------------------------------+-------+
| Variable_name | Value |
+---------------------------------+-------+
| rpl_semi_sync_slave_enabled | OFF |
| rpl_semi_sync_slave_trace_level | 32 |
+---------------------------------+-------+
mysql> SET GLOBAL rpl_semi_sync_slave_enabled=ON;
mysql> STOP SLAVE IO_THREAD;
mysql> START SLAVE IO_THREAD;
|
測試半同步複製是否成功
1
2
3
4
5
|
mysql> use mydb;
mysql> CREATE TABLE tbl1 (id INT,name VARCHAR(100)); #主節點建立表
mysql> SHOW GLOBAL STATUS LIKE '%semi%'
mysql> SHOW TABLES; #而後去從節點查看 表是否生成
|
添加第二臺從服務器
1
2
3
4
5
6
7
8
|
Node3:
vim /etc/my.cnf.d/server.cnf
[mysqld]
skip_name_resolve=ON
innodb_file_per_table=ON
server_id=3
relay_log=relay-log
|
備份主節點數據並恢復到node3
1
2
3
4
|
root@node1# mysqldump --all-databases --master-data=2 --routine --trigger --event --lock-all-tables >/root/all.sql
less /root/all.sql #記錄了二進制日誌該在哪裏開始複製
mysql <all.sql #在node3導入all.sql數據庫
|
在node3從服務器執行在哪一個postion開始複製
1
2
3
4
5
6
7
8
|
mysql> CHANGE MASTER TO MASTER_HOST='172.18.43.8',MASTER_USER='repluser',MASTER_PASSWORD='replpass',MASTER_LOG_FILE='mysql-bin.000003',MASTER_LOG_POS=1069;
#注意默認從服務器是異步的,若是向作成半同步的,能夠安裝上面一個插件,具體操做能夠看半自動複製,這裏咱們採用異步
mysql> START SLAVE;
mysql> SHOW SLAVE STATUS\G; #查看IO及sql線程 是否啓動了
測試:
node1: CREATE DATABASE hidb;
查看其它節點是否都同步,若是都同步了下面咱們作
MYSQL讀寫分離
|
第二步ProxySql讀寫分離
1.下載及安裝proxysql,直接去 Proxysql官網 下載便可
2. yum install ./proxysql-1.3.6-1-centos7.x86_64.rpm 安裝咱們下載的proxysql數據庫
proxysql.cnf 配置
vim /etc/proxysql.cnf #將原先配置文件重命名,直接新建此文件添加如下配置vim
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
|
datadir="/var/lib/proxysql"
admin_variables=
{
admin_credentials="admin:admin"
mysql_ifaces="127.0.0.1:6032;/tmp/proxysql_admin.sock"
}
mysql_variables=
{
threads=4
max_connections=2048
default_query_delay=0
default_query_timeout=36000000
have_compress=true
poll_timeout=2000
interfaces="0.0.0.0:3306;/tmp/mysql.sock"
default_schema="information_schema"
stacksize=1048576
server_version="5.5.30"
connect_timeout_server=3000
monitor_history=600000
monitor_connect_interval=60000
monitor_ping_interval=10000
monitor_read_only_interval=1500
monitor_read_only_timeout=500
ping_interval_server=120000
ping_timeout_server=500
commands_stats=true
sessions_sort=true
connect_retries_on_failure=10
}
mysql_servers =
(
{
address = "172.18.43.8" # no default, required . If port is 0 , address is interpred as a Unix Socket Domain
port = 3306 # no default, required . If port is 0 , address is interpred as a Unix Socket Domain
hostgroup = 0 # no default, required
status = "ONLINE" # default: ONLINE
weight = 1 # default: 1
compression = 0 # default: 0
},
{
address = "172.18.43.88"
port = 3306
hostgroup = 1
status = "ONLINE" # default: ONLINE
weight = 1 # default: 1
compression = 0 # default: 0
},
{
address = "172.18.43.89"
port = 3306
hostgroup = 1
status = "ONLINE" # default: ONLINE
weight = 1 # default: 1
compression = 0 # default: 0
}
)
mysql_users:
(
{
username = "root"
password = "mageedu"
default_hostgroup = 0
max_connections=1000
default_schema="mydb"
active = 1
}
)
mysql_query_rules:
(
{
rule_id=1
active=1
match_pattern="^SELECT .* FOR UPDATE$"
destination_hostgroup=0
apply=1
}
)
scheduler=
(
)
mysql_replication_hostgroups=
(
{
writer_hostgroup=0
reader_hostgroup=1
}
)
|
主節點node1受權複製帳號
1
2
3
4
5
6
7
8
|
mysql> GRANT ALL ON *.* TO 'root'@'172.18.43.%' IDENTIFIED BY 'mageedu'; #proxysql須要受權一個帳號鏈接後端的mysql,由於咱們在節點受權,從節點帳號也就本身能給同步過去了
root@node1# service proxysql start #啓動proxysql
ss -tnl #默認是4線程的
mysql -uadmin -h127.0.0.1 -padmin -P 6032 #登陸proxysql本地管理接口
mysql> use main;
mysql> SELECT * FROM mysql_servers; #會看到咱們的mysql複製集羣
mysql> select * from mysql_users;
|
測試:後端
1
2
3
4
5
6
7
8
9
10
11
12
|
node1:
mysql -uroot -h172.18.43.200 -pmageedu
mysql> use mydb;
mysql> SHOW TABLES;
mysql> CREATE TABLE tbl3(name CHAR(20));
mysql> SHOW TABLES; #而後咱們去後端三個mysql去查看一下,有則讀寫分離成功
root@n1#
mysql -uroot -h172.18.43.200 -pmageedu -e "INSERT INTO mydb.tbl3 VALUES ('tom'),('jerry')";
mysql> SELECT * FROM mydb.tbl3; #去三個節點看一下
|
配置第二臺Proxysql
1
2
3
|
yum install ./proxysql-1.3.6-1-centos7.x86_64.rpm 安裝咱們下載的proxysql
scp /etc/proxysql.cnf root@n2:/etc/ #直接將第一臺proxysql配置文件傳給第二臺 proxysql
|
測試第二臺proxysql n2
1
2
3
4
5
6
|
mysql -uroot -h172.18.43.100 -pmageedu
mysql> use mydb;
mysql> SHOW TABLES;
mysql> CREATE TABLE tbl4(name CHAR(20));
mysql> SHOW TABLES; #而後咱們去後端三個mysql去查看一下,有則讀寫分離成功
|
第三步keepalived高可用proxysql
n1和n2兩個節點安裝keepalived
1
2
|
yum install keepalived -y
|
keepalived.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalive@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id node1
vrrp_mcast_group4 224.0.101.66
}
vrrp_instance myr {
state MASTER
interface eno16777736
virtual_router_id 67
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 250250
}
virtual_ipaddress {
172.16.43.43/16 dev eno16777736
}
}
|
1
2
3
4
5
6
|
node2
scp node1:/etc/keepalived/keepalived.conf root@node2:/etc/keepalived
state BACKUP #修改狀態爲備用
priority 98 #修改優先級小於master
|
測試:
- 先啓動備用服務器systemctl start keepalived.service
1
2
3
4
5
6
7
8
9
10
|
Note: 此時備用服務器沒有發現主節點,自動轉爲主節點,並配置vip
systemctl status keepalived.service 查看日誌
ip addr list 查看ip是否添加vip
May 13 17:21:33 localhost.localdomain Keepalived_healthcheckers[1490]: Using LinkWatch kernel netlink reflec.....
May 13 17:21:36 localhost.localdomain Keepalived_vrrp[1491]: VRRP_Instance(myr) Transition to MASTER STATE
May 13 17:21:37 localhost.localdomain Keepalived_vrrp[1491]: VRRP_Instance(myr) Entering MASTER STATE
May 13 17:21:37 localhost.localdomain Keepalived_vrrp[1491]: VRRP_Instance(myr) setting protocol VIPs.
May 13 17:21:37 localhost.localdomain Keepalived_healthcheckers[1490]: Netlink reflector reports IP 172.16.0...e
|
- 啓用主節點服務器:
1
2
3
4
5
6
7
|
systemctl start keepalived.service
Note:去備用節點查看狀態systemctl status keepalived.service 發現狀態已經被優先級高的搶爲master
此時本機變爲backup,並且從服務器已經刪除vip
查看主節點狀態信息systemctl status keepalived.service
ip addr list #查看ip是否漂移成功
|
第四步測試mysql讀寫分離高可用是否成功
1
2
3
4
5
6
7
|
mysql -uroot -h172.18.43.43 -pmageedu #訪問VIP地址,肯定能夠正常鏈接
CREATE DATABASE hellodb; #去三個mariadb節點手動查看hellodb數據庫是否建立成功
systemctl stop keepalived #中止n1的proxysql測試proxysql是否還能夠正常訪問
mysql -uroot -h172.18.43.43 -pmageedu
use hellodb;
CREATE TABLE tbl6 (name VARCHAR(20)); #去咱們的三個mariadb查看錶是否建立成功
|