# 系統 centos7.2 安裝版本都是Yum源html
node1: 192.168.8.111 node
node2:192.168.8.112 nginx
vip :192.168.8.200shell
nfs :192.168.8.113 vim
# 互信centos
~] ssh-keygenssh
~]# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keyside
~]# chmod go= .ssh/authorized_keys測試
~]# scp -p .ssh/id_rsa .ssh/authorized_keys node1:/root/.ssh/ui
# 安裝pcs
~]# ansible ha -m service -a "name=pcs state=installd"
# 同步時間
~]# ntpdate 192.168.9.19;ssh node2 ntpdate 192.168.9.19
# 啓動服務
~]# ansible ha -m service -a "name=pcsd state=started enabled=yes"
# 設置hacluster密碼
~]# ansible ha -m shell -a 'echo "xiong" | passwd --stdin hacluster'
# 認證集羣服務
~]# pcs cluster auth node1 node2
Username: hacluster
Password:
node1: Authorized
node2: Authorized
# 將兩個節點加入集羣服務內 --name集羣名稱 節點 廣播地址
~]# pcs cluster setup --name myha node1 node2
Shutting down pacemaker/corosync services...
Redirecting to /bin/systemctl stop pacemaker.service
Redirecting to /bin/systemctl stop corosync.service
Killing any remaining services...
Removing all cluster configuration files...
node1: Succeeded
node2: Succeeded
Synchronizing pcsd certificates on nodes node1, node2...
node1: Success
node2: Success
Restaring pcsd on the nodes in order to reload the certificates...
node1: Success
node2: Success
# 加入集羣以後會產生corosync配置文件
~]# ls /etc/corosync/
corosync.conf corosync.conf.example corosync.conf.example.udpu corosync.xml.example uidgid.d
# 啓動pcs服務
~]# pcs cluster start --all
node2: Starting Cluster...
node1: Starting Cluster...
# 跟蹤查看日誌
tail -f /var/log/cluster/corosync.log
# 查看通訊是否正常
~]# corosync-cfgtool -s
Printing ring status.
Local node ID 1
RING ID 0
id= 127.0.0.1 # 當爲127.0.0.1時說明集羣是失敗的 更改/etc/hosts將127.0.0.1 主機名刪除 只留默認項
status= ring 0 active with no faults
Current DC: 指定的協調員
# crmsh yum源
[network_ha-clustering_Stable]
name=Stable High Availability/Clustering packages (CentOS_CentOS-7)
type=rpm-md
baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/
gpgcheck=1
gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7//repodata/repomd.xml.key
enabled=1
# 安裝crmsh pssh 在哪臺機器上操做就安哪一臺就成
~]# yum -y install crmsh pssh
# nfs配置
~]# ansible ha -m yum -a "name=nfs-utils state=installed"
~]# vim /etc/exports
/opt/pages 192.168.8.0/24(rw)
# 安裝nginx並測試nfs掛載是否生效
~]# ansible ha -m yum -a "name=nginx state=installed"
~]# ansible ha -m shell -a "mount -t nfs 192.168.8.113:/opt/pages /usr/share/nginx/html/"
~]# ansible ha -m shell -a "systemctl start nginx"
~]# ansible ha -m shell -a "umount /usr/share/nginx/html"
~]# ansible ha -m shell -a "df -Th"
# 每次操做須要先 verify 最後完成以後再執行 commit
# 定義虛擬IP地址
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip="192.168.8.200"
# 定義nginx服務
crm(live)configure# primitive vipservice systemd:nginx op monitor interval=30s timeout=20s
# 定義NFS服務
crm(live)configure# primitive vipnfs ocf:heartbeat:Filesystem params device="192.168.8.113:/opt/pages" directory="/usr/share/nginx/html/" fstype="nfs" op start timeout=60s op stop timeout=60s op monitor timeout=40 interval=20
# 定義排列約束, 解析: A ( B C ) AB AC vipservice即要跟vip也得跟vipnfs在一塊兒
crm(live)configure# colocation vipservice_with_vip_and_vipnfs inf: vipservice ( vip vipnfs )
# 定義順序約束 啓動順序 強制先啓動 vip再啓動 vipnfs
crm(live)configure# order vipnfs_after_vip Mandatory: vip vipnfs
# 強制先啓動 vipnfs 再啓動 vipservice
crm(live)configure# order vipservice_after_vipnfs Mandatory: vipnfs vipservice
# 檢查服務是否正常
crm(live)# status
Last updated: Thu May 18 16:00:41 2017Last change: Thu May 18 16:00:36 2017 by root via cibadmin on node1
Stack: corosync
Current DC: node2 (version 1.1.13-10.el7-44eb2dd) - partition with quorum
2 nodes and 3 resources configured
Online: [ node1 node2 ]
Full list of resources:
vip(ocf::heartbeat:IPaddr2):Started node1
vipservice(systemd:nginx):Started node1
vipnfs(ocf::heartbeat:Filesystem):Started node1
# 退出crmsh
# 分別執行ss -tnl | grep 80 df -Th ip addr show 最後執行完成有 將node1強制成爲standby檢查node2節點
# 設置vip資源黏性爲100 傾向於node1
crm(live)configure# location node1_vip vip 100: node1
# 常見錯誤
# Node node1: UNCLEAN (offline) 檢查 corosync-cfgtools -s 查看IP地址是否是127.0.0.1若是是刪除127.1配置的主機名稱
[root@node2 ~]# pcs status
Cluster name: myha
WARNING: no stonith devices and stonith-enabled is not false
Last updated: Wed May 17 15:34:53 2017Last change: Wed May 17 15:31:50 2017 by hacluster via crmd on node2
Stack: corosync
Current DC: node2 (version 1.1.13-10.el7-44eb2dd) - partition WITHOUT quorum
2 nodes and 0 resources configured
Node node1: UNCLEAN (offline)
Online: [ node2 ]
# WARNING: no stonith devices and stonith-enabled is not false
解決: pcs property set stonith-enabled=false