PXC5.7(Percona XtraDB Cluster)+HAproxy+Keepalived 集羣部署

Percona-XtraDB-Cluster+Haproxy 搭建集羣環境

環境準備及服務器信息:

配置防火牆

firewall-cmd --add-port=3306/tcp --permanent
firewall-cmd --add-port=4567/tcp --permanent
firewall-cmd --add-port=4568/tcp --permanent
firewall-cmd --add-port=4444/tcp --permanent
firewall-cmd --reload


安裝官方yum源repo配置文件

yum install http://www.percona.com/downloads/percona-release/redhat/0.1-4/percona-release-0.1-4.noarch.rpm

  

安裝pxc

yum -y install Percona-XtraDB-Cluster-57
yum install Percona-Server-client-57

  

建立用戶和組

groupadd mysql
useradd -g mysql -s /sbin/nologin mysql

  

建立目錄並賦權

mkdir /data/mysql/{data,binlog,slow,logs} -p
touch /data/mysql/logs/mysqld.log
chown -R mysql:mysql /data/mysql


修改/etc/my.cnf配置文件

vi /etc/my.cnf
增長[mysqld] ##已知BUG
 1 vi /etc/percona-xtradb-cluster.conf.d/mysqld.cnf  2 替換爲:  3 [client]  4 port = 3306
 5 socket = /data/mysql/data/mysql.sock  6 default-character-set = utf8mb4  7 
 8 [mysqld]  9 # basic settings #  10 user = mysql  11 port=3306
 12 sql_mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER"
 13 autocommit = 1
 14 server-id=163
 15 character_set_server=utf8mb4  16 init_connect='SET NAMES utf8'
 17 transaction_isolation = READ-COMMITTED  18 lower_case_table_names = 1
 19 explicit_defaults_for_timestamp = 1
 20 max_allowed_packet = 16777216
 21 event_scheduler = 1
 22 datadir = /data/mysql/data  23 basedir = /var/lib/mysql  24 pid-file = /data/mysql/data/mysqld.pid  25 socket = /data/mysql/data/mysql.sock  26 default-time_zone = '+8:00'
 27 
 28 # connection #  29 interactive_timeout = 1800
 30 wait_timeout = 1800
 31 lock_wait_timeout = 1800
 32 skip_name_resolve = 1
 33 max_connections = 5000
 34 max_connect_errors = 1000000
 35 
 36 # table cache performance settings  37 table_open_cache = 4096
 38 table_definition_cache = 4096
 39 table_open_cache_instances = 128
 40 
 41 # session memory settings #  42 read_buffer_size = 5M  43 read_rnd_buffer_size = 10M  44 sort_buffer_size = 10M  45 tmp_table_size = 25M  46 join_buffer_size = 40M  47 thread_cache_size = 20M  48 
 49 # log settings #  50 log_error = /data/mysql/logs/mysqld.log  51 slow_query_log_file = /data/mysql/slow/slow.log  52 log-bin= /data/mysql/binlog/mysql-bin  53 relay_log = mysql-relay-bin  54 general_log_file= general.log  55 
 56 slow_query_log = 1
 57 log_queries_not_using_indexes = 1
 58 log_slow_admin_statements = 1
 59 log_slow_slave_statements = 1
 60 log_throttle_queries_not_using_indexes = 10
 61 long_query_time = 1
 62 min_examined_row_limit = 100
 63 binlog-rows-query-log-events = 1
 64 log-bin-trust-function-creators = 1
 65 expire-logs-days = 7
 66 log-slave-updates = 1
 67 
 68 # innodb settings #  69 innodb_page_size = 16384
 70 innodb_buffer_pool_size = 256M  71 innodb_buffer_pool_instances = 8
 72 innodb_buffer_pool_load_at_startup = 1
 73 innodb_buffer_pool_dump_at_shutdown = 1
 74 innodb_lru_scan_depth = 4096
 75 innodb_lock_wait_timeout = 5
 76 innodb_io_capacity = 10000
 77 innodb_io_capacity_max = 20000
 78 innodb_flush_method = O_DIRECT  79 innodb_file_format = Barracuda  80 innodb_file_format_max = Barracuda  81 
 82 #undo  83 innodb_undo_directory = /data/mysql/data  84 innodb_undo_logs = 128
 85 innodb_undo_tablespaces = 3
 86 
 87 #redo  88 innodb_log_group_home_dir = /data/mysql/data  89 innodb_log_file_size = 10M  90 innodb_log_files_in_group = 2
 91 
 92 innodb_flush_neighbors = 0
 93 innodb_log_buffer_size = 16384
 94 innodb_purge_threads = 4
 95 innodb_large_prefix = 1
 96 innodb_thread_concurrency = 64
 97 innodb_print_all_deadlocks = 1
 98 innodb_strict_mode = 1
 99 innodb_sort_buffer_size = 16384
100 innodb_write_io_threads = 16
101 innodb_read_io_threads = 16 
102 innodb_file_per_table = 1
103 innodb_stats_persistent_sample_pages = 64
104 innodb_autoinc_lock_mode = 2
105 innodb_online_alter_log_max_size=100M 106 innodb_open_files=4096
107 
108 # replication settings # 109 master_info_repository = TABLE 110 relay_log_info_repository = TABLE 111 sync_binlog = 1
112 gtid_mode = on 113 enforce_gtid_consistency = 1
114 log_slave_updates 115 binlog_format = ROW 116 binlog_rows_query_log_events = 1
117 relay_log = relay.log 118 relay_log_recovery = 1
119 slave_skip_errors = ddl_exist_errors 120 slave-rows-search-algorithms = 'INDEX_SCAN,HASH_SCAN'
121 
122 [mysqld-5.6] 123 # metalock performance settings 124 metadata_locks_hash_instances=64
125 
126 [mysqld-5.7] 127 # new innodb settings # 128 loose_innodb_numa_interleave=1
129 innodb_buffer_pool_dump_pct = 40
130 innodb_page_cleaners = 16
131 innodb_undo_log_truncate = 1
132 innodb_max_undo_log_size = 100M #2G 133 innodb_purge_rseg_truncate_frequency = 128
134 # new replication settings # 135 slave-parallel-type = LOGICAL_CLOCK 136 slave-parallel-workers = 16
137 slave_preserve_commit_order=1
138 slave_transaction_retries=128
139 # other change settings # 140 binlog_gtid_simple_recovery=1
141 log_timestamps=system 142 show_compatibility_56=on 143 
144 # Disabling symbolic-links is recommended to prevent assorted security risks 145 symbolic-links=0
View Code
vi /etc/percona-xtradb-cluster.conf.d/mysqld_safe.cnf 
替換pid-file、socket爲:
pid-file = /data/mysql/data/mysqld.pid
socket = /data/mysql/data/mysql.sock
vi /etc/percona-xtradb-cluster.conf.d/wsrep.cnf 
修改:
wsrep_cluster_address=gcomm://192.168.253.28,192.168.253.29,192.168.253.30 ##根據實際修改
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_node_name =pxc-linux-29 ##根據實際修改
wsrep_node_address=192.168.253.29 ##根據實際修改
wsrep_sst_method=xtrabackup-v2
wsrep_cluster_name=test-pxc
wsrep_sst_auth="sstuser:s3cretPass" ##根據實際修改

 

第一個節點啓動

systemctl start mysql@bootstrap

備註:謹記,只要是啓動集羣的第一個Node(首次搭建集羣或者集羣所有關閉),都要用此命令前端

修改密碼

mysql5.7版本日誌均在error.log 裏面生成
grep "temporary password" /data/logs/mysql/mysqld.log

使用改密碼登錄MySQL,修改爲本身想要的密碼
mysql> alter user 'root'@'localhost' idnetified by 'abc123';

建立SST同步用戶
mysql> GRANT PROCESS,RELOAD,LOCK TABLES,REPLICATION CLIENT ON *.* TO 'sstuser'@'192.168.%.%' IDENTIFIED BY 's3cretPass';
mysql> flush privileges;

 

其它節點啓動

systemctl start mysql

備註:謹記,只要集羣有一個Node啓動,其他節點都是用此命令node


Haproxy負載均衡

Haproxy是一個反向代理負載均衡解決方案,支持4層和7層模式,提供後端服務器健康檢查,很是穩定。淘寶前期也使用Haproxy做爲CDN系統負載均衡器python

安裝haproxy

yum -y install haproxy
在cluster的MySQL上建立用戶(一個節點建立,會被複制到其它節點)
監控用賬號:
grant usage on *.* to 'pxc-monitor'@'%' identified by 'testpxc';


服務測試賬號:
grant all privileges on *.* to 'zxw'@'%' identified by 'xxwzopop';

配置haproxy.cfg

 1 vi /etc/haproxy/haproxy.cfg  2 #---------------------------------------------------------------------
 3 # Global settings  4 #---------------------------------------------------------------------
 5 global
 6         log 127.0.0.1 local2  7         chroot /var/lib/haproxy  8         pidfile /var/run/haproxy.pid  9         maxconn 4000
10  user haproxy 11  group haproxy 12  daemon 13 defaults 14  mode tcp 15         log global
16  option tcplog 17  option dontlognull 18         retries 3
19         timeout http-request 10s 20  timeout queue 1m 21  timeout connect 10s 22  timeout client 1m 23  timeout server 1m 24         timeout http-keep-alive 10s 25  timeout check 10s 26         maxconn 3000
27 frontend mysql 28         bind *:3307
29  mode tcp 30         #log global
31  option tcplog 32  default_backend mysqlservers 33 backend mysqlservers 34  balance leastconn 35         server dbsrv1 10.10.48.62:3306 check port 9200 rise 1 fall 2 maxconn 300
36         server dbsrv2 10.10.48.64:3306 check port 9200 rise 1 fall 2 maxconn 300
37         server dbsrv2 10.10.48.66:3306 check port 9200 rise 1 fall 2 maxconn 300
38 ## 定義一個監控頁面,監聽在1080端口,並啓用了驗證機制 39 listen stats 40  mode http 41         bind 0.0.0.0:8888
42  stats enable 43         stats hide-version 44         stats uri /haproxyadmin?stats 45  stats realm Haproxy\ Statistics 46  stats auth admin:admin 47         stats admin if TRUE
View Code

配置haproxy的日誌:

安裝完HAProxy後,默認狀況下,HAProxy爲了節省讀寫IO所消耗的性能,默認狀況下沒有日誌輸出,一下是開啓日誌的過程mysql

yum -y install rsyslog

# vim /etc/rsyslog.conf 
# 
# 
...........
$ModLoad imudp
$UDPServerRun 514 //rsyslog 默認狀況下,須要在514端口監聽UDP,因此能夠把這兩行註釋掉
.........
local2.* /var/log/haproxy.log //和haproxy的配置文件中定義的log level一致


systemctl start rsyslog

 

在PXC 每一個mysql節點安裝mysql健康狀態檢查腳本(須要在pxc的每一個節點執行)

腳本拷貝

# cp /usr/local/mysql/bin/clustercheck /usr/bin/
# cp /usr/local/mysql/xinetd.d/mysqlchk /etc/xinetd.d/

ps:clustercheck和腳本都是默認值沒有修改

建立mysql用戶,用於mysql健康檢查(在任一節點便可):

grant process on *.* to 'clustercheckuser'@'localhost' identified by 'clustercheckpassword!';
flush privileges;

ps:如不使用clustercheck中默認用戶名和密碼,將須要修改clustercheck腳本,MYSQL_USERNAME和MYSQL_PASSWORD值

更改用戶名和密碼(三個節點都得修改)

#vim /usr/bin/clustercheck
MYSQL_USERNAME="pxc-monitor"
MYSQL_PASSWORD="testpxc"

更改/etc/services添加mysqlchk的服務端口號:

echo 'mysqlchk 9200/tcp # mysqlchk' >> /etc/services

安裝xinetd服務,經過守護進程來管理mysql健康狀態檢查腳本

yum -y install xinetd ###很是重要
systemctl enable xinetd systemctl start xinetd

clustercheck腳本測試

/usr/bin/clustercheck

HTTP/1.1 200 OK
Content-Type: text/plain
Connection: close
Content-Length: 40

Percona XtraDB Cluster Node is synced.

ps:要保證狀態爲200,不然檢測不經過,多是mysql服務不正常,或者環境不對導致haproxy沒法使用mysql
haproxy如何偵測 MySQL Server 是否存活,靠着就是 9200 port,透過 Http check 方式,讓 HAProxy 知道 PXC 狀態

 

啓動haproxy

#啓動命令
haproxy -f /etc/haproxy/haproxy.cfg

#檢查後臺進程 ps -ef |grep haproxy haproxy 9754 0 0 11:29 ? 00:00:00 haproxy -f /etc/haproxy/haproxy.cfg root 9823 74 0 11:30 ? 00:00:00 grep --color=auto haproxy
#檢查端口狀況 netstat -nlap |grep haproxy tcp 0 0 0.0.0.0:3307 0.0.0.0:* LISTEN 9754/haproxy tcp 0 0 0.0.0.0:8088 0.0.0.0:* LISTEN 9754/haproxy udp 0 0 0.0.0.0:59349 0.0.0.0:* 9754/haproxy unix 2 [ ACC ] STREAM LISTENING 30637572 9754/haproxy /var/lib/haproxy/stats.9753.tmp
#配置開機自啓動 # cp /usr/local/sbin/haproxy /usr/sbin/haproxy cd /opt/soft/haproxy-1.5.3/examples [root@db169 examples]# cp haproxy.init /etc/init.d/haproxy [root@db169 examples]# chmod +x /etc/init.d/haproxy

haproxy測試

在mysql pxc建立測試帳號:

grant all privileges on *.* to 'zxw'@'%' identified by 'xxwzopop';
#for i in `seq 1 10`;do mysql -h 192.168.1.163 -P3307 -uzxw -pxxwzopop -e "select @@hostname;";done

注:其實能夠只容許haproxy側的IP訪問便可,因用戶經過vip訪問mysql集羣,haproxy根據調度策略使用本身的ip建立與後端mysql服務器的鏈接。

 

查看Haproxy狀態:

http://192.168.1.163:8088/haproxy/stats
輸入用戶密碼:stats auth pxcstats:xxwzopop


用keepalived實現haproxy 的高可用

 

安裝

yum install -y gcc openssl-devel popt-devel ipvsadm
yum -y install kernel kernel-devel* popt popt-devel libssl-dev libnl libnl-devel openssl openssl-* ipvsadm libnfnetlink-devel
yum install keepalived -y
yum install MySQL-python -y
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

開啓防火牆VRRP

#開啓vrrp 協議  不然會出現雙VIP的狀況

 firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0  --protocol vrrp -j ACCEPT
 firewall-cmd --reload

配置

vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
  router_id haproxy_pxc #keepalived組的名稱
}
vrrp_script chk_haprocy {
  script "/etc/keepalived/check_haproxy.sh" 
  interval 2
  weight 2
}
vrrp_instance VI_HAPROXY {
  state MASTER #備份機是BACKUP
  #nopreempt #非搶佔模式
  interface eth0
  virtual_router_id 51 #同一集羣中該數值要相同,只能從1-255
  priority 100 //備庫能夠90
  advert_int 1
  authentication {
  auth_type PASS #Auth 用密碼,但密碼不要超過8位
  auth_pass 1111
}
track_script {
  chk_haprocy
}
virtual_ipaddress {
  192.168.1.188/24
}
}

 

vi /etc/keepalived/check_haproxy.sh
#!/bin/bash
A=`ps -C haproxy --no-header |wc -l`
if [ $A -eq 0 ];then
/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg 
sleep 3
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
/etc/init.d/keepalived stop
fi
fi

chmod 755 /etc/keepalived/check_haproxy.sh

 

# 遷移數據

vi /etc/percona-xtradb-cluster.conf.d/mysqld.cnf
replicate-do-db=dbtest

導入昨天全量備份數據

change master to master_host='10.200.22.33',master_port=3306,master_user='repl',master_password='mysql',master_log_file='master-bin.000009',master_log_pos=674;

start slave;

 

PMM監控:

pmm server:

docker pull percona/pmm-server:latest
mkdir -p /data/pmm_data/opt/prometheus/data
mkdir -p /data/pmm_data/opt/consul-data
mkdir -p /data/pmm_data/var/lib/mysql
mkdir -p /data/pmm_data/var/lib/grafana
docker create -v /data/pmm_data/opt/prometheus/data -v /data/pmm_data/opt/consul-data -v /data/pmm_data/var/lib/mysql -v /data/pmm_data/var/lib/grafana --name pmm-data percona/pmm-server:latest /bin/true

docker run -d \
-e ORCHESTRATOR_ENABLED=true \
-e METRICS_RETENTION=720h \
-e SERVER_USER=admin \
-e SERVER_PASSWORD=abcd.1234 \
-p 8080:80 \
--net staticnet \
--ip 192.168.0.11 \
--volumes-from pmm-data \
--name pmm-server \
--restart always percona/pmm-server:latest

 

pmm client:

yum -y install http://www.percona.com/downloads/percona-release/redhat/0.1-4/percona-release-0.1-4.noarch.rpm
yum install pmm-client -y

 

新加入配置

pmm-admin config --server 10.1.12.114:8080 --server-user admin --server-password abcd.1234
#添加linux監控
pmm-admin add linux:metrics
#建立MySQL監控帳號並開啓innodb_monitor_enable
GRANT SELECT, PROCESS, SHOW DATABASES, REPLICATION CLIENT ON *.* TO 'pmm'@'10.1.%' IDENTIFIED BY 'L2iLf#eqISQ613u^';
set global innodb_monitor_enable=all;
#添加MySQL監控
pmm-admin add mysql --user pmm --password 'L2iLf#eqISQ613u^' --host 10.1.21.33 --query-source perfschema

#查看配置信息
pmm-admin list
#檢查網絡
pmm-admin check-network

[linux:metrics] OK, already monitoring this system.
[mysql:metrics] OK, now monitoring MySQL metrics using DSN root:***@unix(/data/mysql/data/mysql.sock)
[mysql:queries] OK, now monitoring MySQL queries from slowlog using DSN root:***@unix(/data/mysql/data/mysql.sock)


前端訪問地址:
瀏覽器輸入Server IP : http://10.200.22.33:8881
輸入默認的用戶名密碼:adminlinux

 

 

PMM客戶端安裝(RPM包):

1.下載rpm安裝

wget https://www.percona.com/downloads/pmm/1.17.1/binary/redhat/7/x86_64/pmm-client-1.17.1-1.el7.x86_64.rpmsql

2.安裝

rpm -ivh pmm-client-1.17.1-1.el7.x86_64.rpmdocker

3.檢查PMM版本

pmm-admin --versionbootstrap

4.開通防火牆,並確認網絡環境

firewall-cmd --zone=public --add-port=42000/tcp --permanent
firewall-cmd --zone=public --add-port=42002/tcp --permanent
firewall-cmd --reloadvim

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~後端

1.先備份iptables

cp /etc/sysconfig/iptables /var/tmp

2.修改/etc/sysconfig/iptables文件
vi /etc/sysconfig/iptables
-A INPUT -p tcp -m state --state NEW -m tcp --dport 42000 -j ACCEPT
-A INPUT -p udp -m state --state NEW -m udp --dport 42000 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 42002 -j ACCEPT
-A INPUT -p udp -m state --state NEW -m udp --dport 42002 -j ACCEPT

3.重啓防火牆

service iptables restart

 

5.配置

a.配置Server

pmm-admin config --server 10.1.12.114:8080 --server-user admin --server-password abcd.1234 --client-name=bxjc-m-48-12

b.添加linux監控

pmm-admin add linux:metrics

c.添加MySQL監控

#建立MySQL監控帳號並開啓innodb_monitor_enable
GRANT SELECT, PROCESS, SHOW DATABASES, REPLICATION CLIENT ON *.* TO 'pmm'@'10.1.%' IDENTIFIED BY 'L2iLf#eqISQ613u^';
set global innodb_monitor_enable=all;

pmm-admin add mysql --user pmm --password 'L2iLf#eqISQ613u^' --host 10.10.48.12 --port=3306 --query-source perfschema

d.檢查配置狀況

pmm-admin listpmm-admin check-network

相關文章
相關標籤/搜索