pacemaker搭建HTTP集羣

#以node01爲例修改主機名html

#node02須要一樣的配置node

[root@node01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.5 node01
10.10.10.6 node02
#關閉防火牆以及SElinux
linux

#node02須要一樣配置
apache

[root@node01 ~]# systemctl stop firewalld
[root@node01 ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
Removed symlink /etc/systemd/system/basic.target.wants/firewalld.service.
[root@node01 ~]# setenforce 0
[root@node01 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
centos

#配置pacemaker的EPEL源服務器

#node02須要一樣配置dom

[root@node01 ~]# cat /etc/yum.repos.d/pacemaker.repo
[pacemaker]
name=pacemaker
baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
enabled=1
gpgcheck=0
#下載pacemaker安裝包
ssh

#node02須要一樣配置
ide

[root@node01 ~]#  yum install pacemaker pcs resource-agents -y
#配置節點之間認證
url

#node02須要一樣配置

[root@node01 ~]# ssh-keygen -t rsa -P ''

[root@node01 ~]# ssh-copy-id node02

#修改pacemaker的用戶密碼(pacemaker使用的用戶是hacluster,軟件安裝完後用戶以添加)

#node02須要一樣配置

[root@node01 ~]# passwd hacluster

#啓動pcs服務

#node02須要一樣配置

[root@node01 ~]# systemctl restart pcsd
#設置節點認證

#只在node01操做,全部操做會自動同步到node02

[root@node01 ~]# pcs cluster auth node01 node02
Username: hacluster   
Password:
node02: Authorized
node01: Authorized
#建立一個名爲mycluster的集羣,並將node01和node02加入到集羣節點上

#只在node01操做,全部操做會自動同步到node02

[root@node01 ~]# pcs cluster setup --force --name mycluster node01 node02
Destroying cluster on nodes: node01, node02...
node01: Stopping Cluster (pacemaker)...
node02: Stopping Cluster (pacemaker)...
node01: Successfully destroyed cluster
node02: Successfully destroyed cluster

Sending cluster config files to the nodes...
node01: Succeeded
node02: Succeeded

Synchronizing pcsd certificates on nodes node01, node02...
node02: Success
node01: Success

Restarting pcsd on the nodes in order to reload the certificates...
node02: Success
node01: Success


[root@node01 ~]# pcs cluster start --all
node01: Starting Cluster...
node02: Starting Cluster...
#查看集羣狀態

[root@node01 ~]# pcs status
Cluster name: mycluster
WARNING: no stonith devices and stonith-enabled is not false
Stack: corosync
Current DC: node02 (version 1.1.15-11.el7_3.5-e174ec8) - partition with quorum
Last updated: Mon Sep 11 22:54:14 2017        Last change: Mon Sep 11 22:53:39 2017 by hacluster via crmd on node02

2 nodes and 0 resources configured

Online: [ node01 node02 ]

No resources


Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/disabled


#查看corosync的狀態

[root@node01 ~]# pcs status corosync

Membership information
----------------------
    Nodeid      Votes Name
         1          1 node01 (local)
         2          1 node02

#查看狀態是否正常

[root@node01 ~]#  crm_verify -L -V
   error: unpack_resources:    Resource start-up disabled since no STONITH resources have been defined
   error: unpack_resources:    Either configure some or disable STONITH with the stonith-enabled option
   error: unpack_resources:    NOTE: Clusters with shared data need STONITH to ensure data integrity
Errors found during check: config not valid
**發現報錯了


#關閉報錯信息

[root@node01 ~]#  pcs property set stonith-enabled=false

#設置VIP

#只在node01操做,全部操做會自動同步到node02

[root@node01 ~]# pcs  resource create ClusterIP ocf:heartbeat:IPaddr2 nic=ens34 ip=10.10.10.8 cidr_netmask=32  op monitor interval=30s

#安裝http服務

#node02須要一樣配置

[root@node01 ~]#  yum  -y   install  httpd


#編輯apache首頁

#node02須要一樣配置(node2上把node01改成node02)

[root@node01 ~]# vi /var/www/html/index.html
 <html>
 <body>welcome to  node 1 </body>
 </html>

#配置apache的URL;

#爲了監視您的Apache實例的健康和恢復它若是失敗,起搏器假設所使用的資源代理服務器狀態的URL。

#node02須要一樣配置

[root@node01 ~]# vi /etc/httpd/conf/httpd.conf

<Location /server-status>
   SetHandler server-status
   Order deny,allow
   Deny from all
   Allow from 127.0.0.1
</Location>

#將apache加入集羣

#只在node01操做,全部操做會自動同步到node02

[root@node01 ~]# pcs resource create Web ocf:heartbeat:apache  configfile=/etc/httpd/conf/httpd.conf       statusurl="http://localhost/server-status" op monitor interval=1min


#設置apache的超時時間

##只在node01操做,全部操做會自動同步到node02

[root@node01 ~]#  pcs resource op defaults timeout=240s


#將VIP和apache捆綁在一塊兒

##只在node01操做,全部操做會自動同步到node02

[root@node01 ~]# pcs constraint colocation add Web ClusterIP INFINITY

#設置啓動順序

##只在node01操做,全部操做會自動同步到node02

[root@node01 ~]#  pcs constraint order ClusterIP then Web
Adding ClusterIP Web (kind: Mandatory) (Options: first-action=start then-action=start)

#查看集羣狀態

[root@node01 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: node01 (version 1.1.15-11.el7_3.5-e174ec8) - partition with quorum
Last updated: Tue Sep 12 16:06:59 2017        Last change: Tue Sep 12 16:06:49 2017 by root via cibadmin on node01

2 nodes and 2 resources configured

Online: [ node01 node02 ]

Full list of resources:

 ClusterIP    (ocf::heartbeat:IPaddr2):    Started node01
 Web    (ocf::heartbeat:apache):    Started node01

Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/disabled

#此時集羣都在node01上。


#咱們宕掉node01,在node02上查看集羣狀態

[root@node02 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: node02 (version 1.1.15-11.el7_3.5-e174ec8) - partition with quorum
Last updated: Tue Sep 12 17:02:24 2017        Last change: Tue Sep 12 17:01:57 2017 by root via cibadmin on node01

2 nodes and 2 resources configured

Online: [ node02 ]
OFFLINE: [ node01 ]

Full list of resources:

 ClusterIP    (ocf::heartbeat:IPaddr2):    Started node02
 Web    (ocf::heartbeat:apache):    Started node02Daemon Status:  corosync: active/disabled  pacemaker: active/disabled  pcsd: active/disabled

相關文章
相關標籤/搜索