前言:html
由於公司大量用到了corosync+packmaker,本人以前只是瞭解,並沒深刻比較keepalive及heatbeatcorosync+packmaker的區別,原理方面不探討,能夠查看官網,或者查看網上資料。node
這裏僅僅分享一下本身作的一個小實驗,算是基礎入門。新公司東西比較多,所以最近博客更新會很慢,可是之後有時間分享的東西,則都是網上少見的,加上本身的經驗之談,熱愛開源,熱愛分享。mysql
實驗目的:nginx
利corosync+packmaker,作到http高可用訪問。固然能夠結合drdb,nfs,MySQL等服務,玩出新花樣。web
實驗規劃:sql
node1:httpdshell
192.168.179.129 業務地址apache
192.168.107.128 心跳地址bootstrap
192.168.8.111 訪問地址vim
node2 httpd
192.168.179.130 業務地址
192.168.107.129 心跳地址
192.168.8.112
node3 NFS-Server
192.168.179.131 業務地址
192.168.107.130 訪問地址
fip(vip):192.168.8.140
準備工做:
1
2
3
4
5
6
7
8
9
|
node1 node2 node3:
yum
install
lrzsz wget -y
yum
install
ntpdate -y
[root@node1 ~]
# crontab -l
*
/1
* * * *
/usr/sbin/ntpdate
time
.nist.gov >
/dev/null
2>&1
[root@node2 ~]
# crontab -l
*
/1
* * * *
/usr/sbin/ntpdate
time
.nist.gov >
/dev/null
2>&1
[root@node3 ~]
# crontab -l
*
/1
* * * *
/usr/sbin/ntpdate
time
.nist.gov >
/dev/null
2>&1
|
hosts文件:
1
2
3
4
|
node1 node2 node3:
192.168.179.129 node1
192.168.179.130 node2
192.168.179.131 node3
|
主機互信:
1
2
3
4
5
6
7
8
9
10
|
node1 node2 node3:
[root@node1 ~]
# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
[root@node1 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node2
[root@node1 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node2ssh-copy-id -i .ssh/id_rsa.pub root@node3
[root@node2 ~]
# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
[root@node2 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node1
[root@node2 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node3
[root@node3 ~]
# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
[root@node3 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node1
[root@node3 ~]
# ssh-copy-id -i .ssh/id_rsa.pub root@node2
|
部署:
1
2
3
4
|
node1 node2:
yum
install
corosync* -y
yum
install
pacemaker* -y
yum
install
cluster-glue* -y
|
配置:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
|
node1 node2:
[root@node1 ~]
# cd /etc/corosync/
[root@node1 corosync]
# cp corosync.conf.example corosync.conf
[root@node1 corosync]
# vim corosync.conf
[root@node1 ~]
# cat /etc/corosync/corosync.conf
totem {
version: 2
crypto_cipher: none
crypto_hash: none
interface {
member {
memberaddr: 192.168.107.128
}
member {
memberaddr: 192.168.107.129
}
ringnumber: 0
bindnetaddr: 192.168.107.0
mcastport: 5405
}
transport: udpu
}
service {
name: pacemaker
}
logging {
fileline: off
to_stderr: no
to_logfile:
yes
logfile:
/var/log/cluster/corosync
.log
to_syslog:
yes
debug: off
timestamp: on
logger_subsys {
subsys: QUORUM
debug: off
}
}
amf {
mode: disabled
}
aisexec {
user: root
greoup: root
}
quorum {
provider: corosync_votequorum
expected_votes: 2
two_nodes: 1
}
[root@node1 ~]
#
[root@node1 ~]
# scp /etc/corosync/corosync.conf root@node2:/etc/corosync/
|
爲集羣生成corosync keys並拷貝到其餘節點:
node1:
1
2
3
4
5
6
|
[root@node1 ~]
# mv /dev/{random,random.bak}
[root@node1 ~]
# ln -s /dev/urandom /dev/random
[root@node1 ~]
# corosync-keygen
[root@node1 ~]
# ll /etc/corosync/authkey
[root@node1 ~]
# chmod 0400 /etc/corosync/authkey
[root@node1 ~]
# scp /etc/corosync/authkey root@node2:/etc/corosync/
|
啓動服務:
1
2
3
4
5
6
7
|
node1 node2:
[root@node1 ~]
# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [肯定]
[root@node1 ~]
#
[root@node1 ~]
# ssh node2 "/etc/init.d/corosync start"
Starting Corosync Cluster Engine (corosync): [肯定]
[root@node1 ~]
#
|
#crm編譯安裝:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
[root@node1 ~]
# tar xf crmsh-2.1.2.tar.gz -C /usr/local/
[root@node1 ~]
# ln -s /usr/local/crmsh-2.1.2/ /usr/local/crmsh
[root@node1 crmsh]
# cd /usr/local/crmsh
[root@node1 crmsh]
# yum install autoconf automake -y
[root@node1 crmsh]
# yum install autoconf automake -y
[root@node1 crmsh]
# ./autogen.sh
autoconf:autoconf (GNU Autoconf) 2.63
automake:automake (GNU automake) 1.11.1
aclocal
automake --add-missing --include-deps --copy
configure.ac:34: installing `.
/install-sh
'
configure.ac:34: installing `.
/missing
'
doc
/Makefile
.am:38: `%'-style pattern rules are a GNU
make
extension
doc
/Makefile
.am:40: `%'-style pattern rules are a GNU
make
extension
modules
/Makefile
.am:22: installing `.
/py-compile
'
Makefile.am: installing `.
/INSTALL
'
autoconf
Now run .
/configure
[root@node1 crmsh]
#
[root@node1 crmsh]
# yum install gcc -y
[root@node1 crmsh]
# ./configure
[root@node1 crmsh]
# make
[root@node1 crmsh]
# make install
|
#yum 安裝
1
2
3
4
|
[root@node1 ~]
# yum install crmsh -y
[root@node1 ~]
# yum install pssh -y
[root@node2~]
# yum install crmsh -y
[root@node2~]
# yum install pssh -y
|
#crm詳解:
crm有兩種工做方式
1,批處理模式,就是在shell命令行中直接輸入命令
2,交互式模式(crm(live)#)進入到crmsh中交互執行
1
2
3
4
5
6
7
|
[root@node1 ~]
# crm
crm(live)
# # help #輸入help查看一下,會出下不少子命令
--help bye configure help options resource up
-h
cd
corosync
history
quit script
? cib end
ls
ra site
back cluster
exit
node report status
crm(live)
#
|
一級子命令
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
[root@node1 corosync]
# crm
crm(live)
# help
This is crm shell, a Pacemaker
command
line interface.
Available commands:
cib manage shadow CIBs
//cib
沙盒
resource resources management
//
全部的資源都在這個子命令後定義
configure CRM cluster configuration
//
編輯集羣配置信息
node nodes management
//
集羣節點管理子命令
options user preferences
//
用戶優先級
history
CRM cluster
history
//
命令歷史記錄
site Geo-cluster support
ra resource agents information center
//
資源代理子命令(全部與資源代理相關的程都在此命令之下)
status show cluster status
//
顯示當前集羣的狀態信息
help,? show help (help topics
for
list of topics)
//
查看當前區域可能的命令
end,
cd
,up go back one level
//
返回第一級crm(live)
quit,bye,
exit
exit
the program
//
退出crm(live)交互模式
|
resource資源狀態控制子命令
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
crm(live)resource
# help
vailable commands:
status show status of resources
//
顯示資源狀態信息
start start a resource
//
啓動一個資源
stop stop a resource
//
中止一個資源
restart restart a resource
//
重啓一個資源
promote promote a master-slave resource
//
提高一個主從資源
demote demote a master-slave resource
//
降級一個主從資源
manage put a resource into managed mode
//
將一個資源加入到管理模式下
unmanage put a resource into unmanaged mode
//
從管理模式下去除一個資源
migrate migrate a resource to another node
//
將資源遷移到另外一個節點上
unmigrate unmigrate a resource to another node
param manage a parameter of a resource
//
管理資源的參數
secret manage sensitive parameters
//
管理敏感參數
meta manage a meta attribute
//
管理源屬性
utilization manage a utilization attribute
failcount manage failcounts
//
管理失效計數器
cleanup cleanup resource status
//
清理資源狀態
refresh refresh CIB from the LRM status
//
從LRM(LRM本地資源管理)更新CIB(集羣信息庫),在
reprobe probe
for
resources not started by the CRM
//
探測在CRM中沒有啓動的資源
trace start RA tracing
//
啓用資源代理(RA)追蹤
untrace stop RA tracing
//
禁用資源代理(RA)追蹤
help show help (help topics
for
list of topics)
//
顯示幫助
end go back one level
//
返回一級(crm(live)
#)
quit
exit
the program
//
退出交互式程序
|
configure資源定義子命令
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
crm(live)configure
# help #兩次table顯示子命令,輸入help加你想了解的任意命令,就會顯示該命令的使用幫助與案例
Available commands:
node define a cluster node
//
定義一個集羣節點
primitive define a resource
//
定義資源
monitor add monitor operation to a primitive
//
對一個資源添加監控選項(如超時時間,啓動失敗後的操做)
group define a group
//
定義一個組類型(將多個資源整合在一塊兒)
clone define a clone
//
定義一個克隆類型(能夠設置總的克隆數,每個節點上能夠運行幾個克隆)
ms define a master-slave resource
//
定義一個主從類型(集羣內的節點只能有一個運行主資源,其它從的作備用)
rsc_template define a resource template
//
定義一個資源模板
location a location preference
//
定義位置約束優先級(默認運行於那一個節點(若是位置約束的值相同,默認傾向性那一個高,就在那一個節點上運行))
colocation colocate resources
//
排列約束資源(多個資源在一塊兒的可能性)
order order resources
//
資源的啓動的前後順序
rsc_ticket resources ticket dependency
property
set
a cluster property
//
設置集羣屬性
rsc_defaults
set
resource defaults
//
設置資源默認屬性(粘性)
fencing_topology node fencing order
//
隔離節點順序
role define role access rights
//
定義角色的訪問權限
user define user access rights
//
定義用用戶訪問權限
op_defaults
set
resource operations defaults
//
設置資源默認選項
schema
set
or display current CIB RNG schema
show display CIB objects
//
顯示集羣信息庫對
edit edit CIB objects
//
編輯集羣信息庫對象(vim模式下編輯)
filter filter CIB objects
//
過濾CIB對象
delete delete CIB objects
//
刪除CIB對象
default-timeouts
set
timeouts
for
operations to minimums from the meta-data
rename rename a CIB object
//
重命名CIB對象
modgroup modify group
//
改變資源組
refresh refresh from CIB
//
從新讀取CIB信息
erase erase the CIB
//
清除CIB信息
ptest show cluster actions
if
changes were committed
rsctest
test
resources as currently configured
cib CIB shadow management
cibstatus CIB status management and editing
//cib
狀態管理和編輯
template edit and
import
a configuration from a template
//
編輯或導入配置模板
commit commit the changes to the CIB
//
將更改後的信息提交寫入CIB
verify verify the CIB with crm_verify
//CIB
語法驗證
upgrade upgrade the CIB to version 1.0
//
升級CIB到1.0
save save the CIB to a
file
//
將當前CIB導出到一個文件中(導出的文件存於切換crm 以前的目錄)
load
import
the CIB from a
file
//
從文件內容載入CIB
graph generate a directed graph
xml raw xml
help show help (help topics
for
list of topics)
//
顯示幫助信息
end go back one level
//
回到第一級(crm(live)
#)
quit
exit
the program
//
退出crm交互模式
|
對配置進行操做
verify、show、edit、delete、save、load、commite都是對已有的配置進行的操做。
(1)verify
用於驗證剛剛完成的配置是否有效。若是出現錯誤,verify會給出一些有用的提示;
(2)show
格式爲:
show [xml] [<id> ...]
show [xml] changed
打印出當前已有的配置。不加參數,會以與命令行相同格式的文本輸出配置。加上xml參數,會以xml的格式輸出配置。若是指定了資源或者屬性的id,show能夠輸出指定部分的配置;
另外,show [xml] changed能夠顯示配置文件發生了那些變化。
(3)edit、delete
edit的格式爲:
edit [xml] [<id> ...]
edit [xml] changed
edit能夠修改已有的配置,不帶參數,則是修改命令行相同格式的文本。加上xml參數,則須要修改xml格式的文本。若是指定了資源或者屬性的id,就能夠修改指定部分的配置。
edit [xml] changed能夠對發生的變更進行編輯。
delete的格式爲:
delete <id> [<id> ...]
delete用於刪除指定id的資源或者屬性,能夠同時刪除多個,id之間使用空格分隔;
(4)save、load
save的格式爲:
save [xml] <file>
save能夠保存當前已有的配置。
不加xml參數,save會以於命令行相同的格式將配置保存爲純文本文件;加上xml參數,則save會將配置保存爲xml格式的文件。
load命令的格式爲:
load [xml] method URL
method :: replace | update
load能夠加載以前保存過的配置文件。提供了2種加載方法;
replace,會清空原來的配置,使用加載的配置文件;
update,會保留原來的配置,嘗試將加載的配置加入當前的配置中。
一樣的,不加xml參數,load會認爲加載的文件使用的是命令行格式;加上xml參數,則load會以xml的方式解析加載的文件。
另外,load能夠直接加載經過url地址加載遠程的配置文件,如http://storage.big.com/cibs/bigcib.xml。
(5)commit
對於集羣配置的修改並非當即生效的,須要執行commite以後才能真正產生做用。某些狀況下,commite時可能會提示拒絕提交剛剛修改的配置,那麼確定是配置的變化存在一些問題。若是可以肯定本身的配置沒有問題,可使用commite force強制提交配置。
node節點管理子命令
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
crm(live)
# node
crm(live)node
# help
Node management and status commands.
Available commands:
status show nodes status as XML
//
以xml格式顯示節點狀態信息
show show node
//
命令行格式顯示節點狀態信息
standby put node into standby
//
模擬指定節點離線(standby在後面必須的FQDN)
online
set
node online
//
節點從新上線
maintenance put node into maintenance mode
//
將一個節點狀態改成maintenance
ready put node into ready mode
//
將一個節點狀態改成ready
fence fence node
//
隔離節點
clearstate Clear node state
//
清理節點狀態信息
delete delete node
//
刪除 一個節點
attribute manage attributes
utilization manage utilization attributes
status-attr manage status attributes
help show help (help topics
for
list of topics)
end go back one level
//
回到上一次
quit
exit
the program
//
退出
|
ra資源代理子命令
1
2
3
4
5
6
7
8
9
10
|
crm(live)
# ra
crm(live)ra
# help
Available commands:
classes list classes and providers
//
爲資源代理分類
list list RA
for
a class (and provider)
//
顯示一個類別中的提供的資源
meta show meta data
for
a RA
//
顯示一個資源代理序的可用參數(如meta ocf:heartbeat:IPaddr2)
providers show providers
for
a RA and a class
help show help (help topics
for
list of topics)
end go back one level
quit
exit
the program
|
查看當前集羣系統所支持的類型
1
2
3
4
5
6
7
8
|
[root@node1 ~]
# crm
crm(live)
# ra
crm(live)ra
# classes
lsb
ocf / .isolation heartbeat pacemaker
service
stonith
crm(live)ra
#
|
查看某種類別下的所用資源代理的列表
1
2
3
4
5
6
7
8
9
10
11
12
13
|
crm(live)ra
# list lsb
auditd blk-availability bmc-snmp-proxy cman corosync
corosync-notifyd crond exchange-bmc-os-info haldaemon halt
htcacheclean httpd ip6tables ipmievd iptables
iscsi iscsid killall libvirt-guests lvm2-lvmetad
lvm2-monitor mdmonitor messagebus modclusterd multipathd
netconsole netfs network nfs nfs-rdma
nfslock ntpdate oddjobd pacemaker pacemaker_remote
postfix quota_nld rdisc rdma restorecond
ricci rpcbind rpcgssd rpcidmapd rpcsvcgssd
rsyslog sandbox saslauthd single sshd
udev-post winbind
crm(live)ra
#
|
查看某個資源代理的配置方法
1
2
3
4
5
6
7
8
9
|
crm(live)ra
# info ocf:heartbeat:IPaddr
crm(live)ra
# list ocf heartbeat
CTDB Delay Dummy Filesystem IPaddr IPaddr2
IPsrcaddr LVM MailTo Route SendArp Squid
VirtualDomain Xinetd apache conntrackd db2 dhcpd
ethmonitor exportfs iSCSILogicalUnit mysql named nfsnotify
nfsserver nginx oracle oralsnr pgsql portblock
postfix rsyncd
symlink
tomcat
crm(live)ra
#
|
若是想要查看某種類別下的所用資源代理的列表,可使用相似以下命令實現:
# crm ra list lsb
# crm ra list ocf heartbeat
# crm ra list ocf pacemaker
# crm ra list stonith
eg:示例
1
2
3
4
5
6
7
8
9
|
crm(live)configure
# show
node node1
node node2
property cib-bootstrap-options: \
have-watchdog=
false
\
dc
-version=1.1.15-5.el6-e174ec8 \
cluster-infrastructure=
"classic openais (with plugin)"
\
expected-quorum-votes=2
crm(live)configure
|
全局配置:
crm(live)configure# property stonith-enabled="false"
crm(live)configure# property no-quorum-policy="ignore"
crm(live)configure# rsc_defaults resource-stickiness=100
配置fip資源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
crm(live)configure
# primitive fip ocf:heartbeat:IPaddr params ip="192.168.179.132" cidr_netmask="255.255.255.0" nic="eth0" op monitor interval="10"
crm(live)configure
#
crm(live)configure
# show #查看已增長好的VIP
node node1
node node2
primitive fip IPaddr \
params ip=192.168.179.132 cidr_netmask=255.255.255.0 nic=eth0 \
op
monitor interval=10
property cib-bootstrap-options: \
have-watchdog=
false
\
dc
-version=1.1.15-5.el6-e174ec8 \
cluster-infrastructure=
"classic openais (with plugin)"
\
expected-quorum-votes=2 \
stonith-enabled=
false
\
no-quorum-policy=ignore
rsc_defaults rsc-options: \
resource-stickiness=100
crm(live)configure
#
crm(live)configure
# verify #檢查一下配置文件有沒有錯誤
crm(live)configure
# commit #提交配置的資源,在命令行配置資源時,只要不用commit提交配置好資源,就不會生效。
[root@node1 ~]
# ping 192.168.8.140
PING 192.168.8.140 (192.168.8.140) 56(84) bytes of data.
64 bytes from 192.168.8.140: icmp_seq=1 ttl=64
time
=0.027 ms
64 bytes from 192.168.8.140: icmp_seq=2 ttl=64
time
=0.046 ms
^C
--- 192.168.8.140
ping
statistics ---
2 packets transmitted, 2 received, 0% packet loss,
time
1172ms
rtt min
/avg/max/mdev
= 0.027
/0
.036
/0
.046
/0
.011 ms
[root@node1 ~]
#
|
將此集羣配置成爲一個active/passive模型的web(httpd)服務集羣
1
2
3
4
5
6
7
8
9
|
[root@node1 ~]
# yum install httpd -y
[root@node1 ~]
# echo "node1 by renzhiyuan" >/var/www/html/index.html
[root@node1 ~]
# /etc/init.d/httpd start
[root@node2 ~]
# yum install httpd -y
[root@node2 ~]
# echo "node2 by renzhiyuan" >/var/www/html/index.html
[root@node2 ~]
# /etc/init.d/httpd start
測試確保可正常訪問:
http:
//192
.168.8.111/
http:
//192
.168.8.112/
|
接下來咱們將此httpd服務添加爲集羣資源。將httpd添加爲集羣資源有兩處資源代理可用:lsb和ocf:heartbeat,爲了簡單起見,咱們這裏使用lsb類型:
首先可使用以下命令查看lsb類型的httpd資源的語法格式:
1
2
3
4
5
6
7
8
9
10
11
|
crm(live)ra
# info lsb:httpd
start and stop Apache HTTP Server (lsb:httpd)
server implementing the current HTTP standards.
Operations' defaults (advisory minimum):
start timeout=15
stop timeout=15
status timeout=15
restart timeout=15
force-reload timeout=15
monitor timeout=15 interval=15
crm(live)ra
#
|
接下來新建資源httpd:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
crm(live)
# configure
crm(live)configure
# primitive httpd lsb:httpd
crm(live)configure
# show
node node1
node node2
primitive httpd lsb:httpd
primitive vip IPaddr \
params ip=192.168.8.140 nic=eth2 cidr_netmask=24
property cib-bootstrap-options: \
expected-quorum-votes=2 \
have-watchdog=
false
\
dc
-version=1.1.15-5.el6-e174ec8 \
cluster-infrastructure=
"classic openais (with plugin)"
\
stonith-enabled=
false
\
no-quorum-policy=ignore
rsc_defaults rsc-options: \
resource-stickiness=100
crm(live)configure
# verify
crm(live)configure
# commit
crm(live)configure
#
|
來查看一下資源狀態
1
2
3
4
5
6
7
8
9
10
11
|
crm(live)# status
Stack: classic openais (with plugin)
Current DC: node1 (version 1.1.15-5.el6-e174ec8) - partition with quorum
Last updated: Wed Jun 28 12:08:55 2017Last change: Wed Jun 28 12:08:28 2017 by root via cibadmin on node1
, 2 expected votes
2 nodes and 2 resources configured
Online: [ node1 node2 ]
Full list of resources:
vip(ocf::heartbeat:IPaddr):Started node1
httpd(lsb:httpd):Started node2
crm(live)#
|
定義組資源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
crm(live)
# configure
crm(live)configure
# group webservice vip httpd
crm(live)configure
# show
node node1
node node2
primitive httpd lsb:httpd
primitive vip IPaddr \
params ip=192.168.8.140 nic=eth2 cidr_netmask=24
group webservice vip httpd
property cib-bootstrap-options: \
expected-quorum-votes=2 \
have-watchdog=
false
\
dc
-version=1.1.15-5.el6-e174ec8 \
cluster-infrastructure=
"classic openais (with plugin)"
\
stonith-enabled=
false
\
no-quorum-policy=ignore
rsc_defaults rsc-options: \
resource-stickiness=100
crm(live)configure
# verify
crm(live)configure
# commit
crm(live)configure
#
|
再次來查看一下資源狀態
1
2
3
4
5
6
7
8
9
10
11
12
|
crm(live)
# status
Stack: classic openais (with plugin)
Current DC: node1 (version 1.1.15-5.el6-e174ec8) - partition with quorum
Last updated: Wed Jun 28 12:13:16 2017Last change: Wed Jun 28 12:11:45 2017 by root via cibadmin on node1
, 2 expected votes
2 nodes and 2 resources configured
Online: [ node1 node2 ]
Full list of resources:
Resource Group: webservice
vip(ocf::heartbeat:IPaddr):Started node1
httpd(lsb:httpd):Started node1
crm(live)
#
|
訪問:
http://192.168.8.140/ node1 by renzhiyuan
故障模擬:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
[root@node1 ~]
# /etc/init.d/corosync stop
[root@node2 ~]
# crm
crm(live)
# node
crm(live)node
# standby
crm(live)node
# cd ..
crm(live)
# cd node
crm(live)node
# standby
crm(live)node
# cd ..
crm(live)
# status
Stack: classic openais (with plugin)
Current DC: node2 (version 1.1.15-5.el6-e174ec8) - partition with quorum
Last updated: Wed Jun 28 12:55:16 2017Last change: Wed Jun 28 12:55:12 2017 by root via crm_attribute on node1
, 2 expected votes
2 nodes and 2 resources configured
Node node1: standby
Online: [ node2 ]
Full list of resources:
Resource Group: webservice
vip(ocf::heartbeat:IPaddr):Started node2
httpd(lsb:httpd):Started node2
crm(live)
#
|
訪問:
http://192.168.8.140/ node2 by renzhiyuan
本文出自 「永不放棄!任志遠」 博客,轉載請與做者聯繫!