1、基礎環境準備node
一、集羣設備列表python
10.240.240.210 client git
10.240.240.211 node1 mon1 osd.0apache
10.240.240.212 node2 mon2 osd.1bootstrap
10.240.240.213 node3 mon3 osd.2vim
二、系統環境centos
[root@client ~]# cat /etc/redhat-release 安全
Red Hat Enterprise Linux Server release 6.5 (Santiago)服務器
[root@client ~]# uname -rapp
2.6.32-431.el6.x86_64
三、卸載redhat自帶的yum包(用redhat的yum源需註冊付費,費時費力,須要卸載redhat的yum包,安裝centos的
rpm -qa | grep yum | xargs rpm -e --nodeps
四、安裝centos的yum包
能夠經過http://mirrors.163.com/centos下載,或http://pan.baidu.com/s/1qW0MbgC下載相關安裝包
rpm -ivh python-iniparse-0.3.1-2.1.el6.noarch.rpm
rpm -ivh yum-metadata-parser-1.1.2-16.el6.x86_64.rpm
rpm -ivh yum-3.2.29-40.el6.centos.noarch.rpm yum-plugin-fastestmirror-1.1.30-14.el6.noarch.rpm
編輯一個本身的yum源
[root@node1 ~]# vi /etc/yum.repos.d/my.repo
[base]
name=CentOS-6 - Base - 163.com
baseurl=http://mirrors.163.com/centos/6/os/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=os
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#released updates
[updates]
name=CentOS-6 - Updates - 163.com
baseurl=http://mirrors.163.com/centos/6/updates/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=updates
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#additional packages that may be useful
[extras]
name=CentOS-6 - Extras - 163.com
baseurl=http://mirrors.163.com/centos/6/extras/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=extras
gpgcheck=1
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-6 - Plus - 163.com
baseurl=http://mirrors.163.com/centos/6/centosplus/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=centosplus
gpgcheck=1
enabled=0
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
#contrib - packages by Centos Users
[contrib]
name=CentOS-6 - Contrib - 163.com
baseurl=http://mirrors.163.com/centos/6/contrib/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=6&arch=$basearch&repo=contrib
gpgcheck=1
enabled=0
gpgkey=http://mirror.centos.org/centos/RPM-GPG-KEY-CentOS-6
五、更新yum源
yum clean all
yum update -y
2、在全部節點安裝ceph的全部的yum源
一、安裝軟件包key
(1)、release.asc key
rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc'
(2)、autobuild.asc key
rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc'
二、安裝ceph附加包源ceph-extras.repo,設置priority=2,確保新的包(如qemu)優先級比標準包的高。
vi /etc/yum.repos.d/ceph-extras.repo
[ceph-extras-source]
name=Ceph Extras Sources
baseurl=http://ceph.com/packages/ceph-extras/rpm/rhel6.5/SRPMS
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
三、安裝ceph源
vi /etc/yum.repos.d/ceph.repo
[ceph]
name=Ceph packages for $basearch
baseurl=http://ceph.com/rpm/rhel6/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
[ceph-noarch]
name=Ceph noarch packages
baseurl=http://ceph.com/rpm/rhel6/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=http://ceph.com/rpm/rhel6/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
四、安裝ceph的apache yum源
vi /etc/yum.repos.d/ceph-apache.repo
[apache2-ceph-noarch]
name=Apache noarch packages for Ceph
baseurl=http://gitbuilder.ceph.com/apache2-rpm-rhel6-x86_64-basic/ref/master/
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc
[apache2-ceph-source]
name=Apache source packages for Ceph
baseurl=http://gitbuilder.ceph.com/apache2-rpm-rhel6-x86_64-basic/ref/master/
enabled=0
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc
五、安裝ceph的ceph-fastcgi yum源
vi /etc/yum.repos.d/ceph-fastcgi.repo
[fastcgi-ceph-basearch]
name=FastCGI basearch packages for Ceph
baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel6-x86_64-basic/ref/master/
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc
[fastcgi-ceph-noarch]
name=FastCGI noarch packages for Ceph
baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel6-x86_64-basic/ref/master/
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc
[fastcgi-ceph-source]
name=FastCGI source packages for Ceph
baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel6-x86_64-basic/ref/master/
enabled=0
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc
六、安裝epel yum源
rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm(安裝這個在更新yum源的時候有報錯)
或者
rpm -Uvh http://mirrors.sohu.com/fedora-epel/6/x86_64/epel-release-6-8.noarch.rpm(在國內推薦用這個,下載速度快)
3、在全部節點安裝ceph包
一、安裝ceph部署機
yum install -y ceph-deploy
二、安裝ceph存儲集羣
(1)、安裝ceph必備軟件
yum install -y snappy leveldb gdisk python-argparse gperftools-libs
(2)、安裝ceph軟件
yum install -y ceph
三、安裝ceph對象網關
(1)、安裝apache fastcgi 須要yum install httpd mod_fastcgi,安裝以前先執行下面的操做
yum install -y yum-plugin-priorities
yum update
安裝apache fastcgi
yum install -y httpd mod_fastcgi
(2)、編輯配置文件 httpd.conf
vim /etc/httpd/conf/httpd.conf
ServerName node1
(3)、啓動httpd進程
/etc/init.d/httpd restart
(4)、安裝SSL (安裝此步驟有報錯)
yum install -y mod_ssl openssl
openssl x509 -req -days 365 -in ca.csr -signkey ca.key -out ca.crt
cp ca.crt /etc/pki/tls/certs
cp ca.key /etc/pki/tls/private/ca.key
cp ca.csr /etc/pki/tls/private/ca.csr
/etc/init.d/httpd restart
(10)、Add Wildcard to DNS (The address of the DNS must also be specified in the Ceph configuration file with the rgw dns name = {hostname} setting.
address=/.ceph-node/192.168.0.1
(11)、安裝ceph對象網關
yum install -y ceph-radosgw ceph
(12)、安裝ceph對象網關代理
yum install -y radosgw-agent
四、爲塊存儲安裝虛擬化軟件
(1)、若是以前有qemu模塊先刪除,確保以後安裝的是最完善的
yum remove -y qemu-kvm qemu-kvm-tools qemu-img
(2)、卸載後從新安裝qemu
yum install -y qemu-kvm qemu-kvm-tools qemu-img
(3)、安裝qemu客戶代理
yum install -y qemu-guest-agent
(4)、安裝libvirt軟件包
yum install -y libvirt
(5)、在全部節點安裝ceph依賴的軟件及模塊
# yum install *argparse* -y
#yum install redhat-lsb -y
# yum install xfs* -y
4、搭建ceph集羣 (此步驟能夠用附錄中的ceph快速腳本安裝)
創建第一個mon節點
一、登陸監控節點node1節點
ls /etc/ceph #查看ceph配置文件目錄是否有東西
二、建立ceph配置文件並配置ceph配置文件內的內容
touch /etc/ceph/ceph.conf #建立一個ceph配置文件
[root@client ~]# uuidgen #執行此命令能夠獲得一個惟一的標識,做爲ceph集羣ID
f11240d4-86b1-49ba-aacc-6d3d37b24cc4
fsid = f11240d4-86b1-49ba-aacc-6d3d37b24cc4 #此標識就是上面獲得的,把此條命令加入ceph的配置文件
mon initial members = node1,node2,node3 #node一、node二、node3做爲ceph集羣的監控節點,把此條命令加入到ceph配置文件
mon host = 10.240.240.211,10.240.240.212,10.240.240.213 #監控節點的地址,把此條命令加入ceph的配置文件中
按下面的內容編輯ceph配置文件
vi /etc/ceph/ceph.conf
[global]
fsid = f11240d4-86b1-49ba-aacc-6d3d37b24cc4
mon initial members = node1,node2,node3
mon host = 10.39.101.1,10.39.101.2,10.39.101.3
public network = 10.39.101.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
filestore xattr use omap = true
osd pool default size = 3
osd pool default min size = 1
osd crush chooseleaf type = 1
osd_mkfs_type = xfs
max mds = 5
mds max file size = 100000000000000
mds cache size = 1000000
mon osd down out interval = 900 #設置osd節點down後900s,把此osd節點逐出ceph集羣,把以前映射到此節點的數據映射到其餘節點。
cluster_network = 10.39.102.0/24
[mon]
mon clock drift allowed = .50 #把時鐘偏移設置成0.5s,默認是0.05s,因爲ceph集羣中存在異構PC,致使時鐘偏移老是大於0.05s,爲了方便同步直接把時鐘偏移設置成0.5s
三、在node1建立各類密鑰
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' #爲監控節點建立管理密鑰
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' #爲ceph amin用戶建立管理集羣的密鑰並賦予訪問權限
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring #添加client.admin key到 ceph.mon.keyring
四、在node1監控節點建立一個mon數據目錄
mkdir -p /var/lib/ceph/mon/ceph-node1
五、在node1建立一個boot引導啓動osd的key
mkdir -p /var/lib/ceph/bootstrap-osd/
ceph-authtool -C /var/lib/ceph/bootstrap-osd/ceph.keyring
六、在node1節點上初始化mon節點,執行下面的命令
ceph-mon --mkfs -i node1 --keyring /tmp/ceph.mon.keyring
七、爲了防止從新被安裝建立一個空的done文件
touch /var/lib/ceph/mon/ceph-node1/done
八、建立一個空的初始化文件
touch /var/lib/ceph/mon/ceph-node1/sysvinit
九、啓動ceph進程
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.node1
十、查看asok mon狀態
[root@node1 ~]# ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
創建第二個mon節點
一、複製node1節點的/etc/ceph目錄到node2
scp /etc/ceph/* node2:/etc/ceph/
二、在node2節點上新建一個/var/lib/ceph/bootstrap-osd/目錄
mkdir /var/lib/ceph/bootstrap-osd/
三、複製node1節點上的/var/lib/ceph/bootstrap-osd/ceph.keyring文件到node2
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node2:/var/lib/ceph/bootstrap-osd/
四、複製node1節點上的/tmp/ceph.mon.keyring
scp /tmp/ceph.mon.keyring node2:/tmp/
五、在node2節點上創建一個/var/lib/ceph/mon/ceph-node2目錄
mkdir -p /var/lib/ceph/mon/ceph-node2
六、在node2節點上初始化mon節點,執行下面的命令
ceph-mon --mkfs -i node2 --keyring /tmp/ceph.mon.keyring
七、爲了防止從新被安裝建立一個空的done文件
touch /var/lib/ceph/mon/ceph-node2/done
八、建立一個空的初始化文件
touch /var/lib/ceph/mon/ceph-node2/sysvinit
九、啓動ceph進程
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.node2
創建第三個mon節點
一、複製node1節點的/etc/ceph目錄到node3
scp /etc/ceph/* node3:/etc/ceph/
二、在node3節點上新建一個/var/lib/ceph/bootstrap-osd/目錄
mkdir /var/lib/ceph/bootstrap-osd/
三、複製node1節點上的/var/lib/ceph/bootstrap-osd/ceph.keyring文件到node3
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node3:/var/lib/ceph/bootstrap-osd/
四、複製node1節點上的/tmp/ceph.mon.keyring
scp /tmp/ceph.mon.keyring node3:/tmp/
五、在node3節點上創建一個/var/lib/ceph/mon/ceph-node3目錄
mkdir -p /var/lib/ceph/mon/ceph-node3
六、在node3節點上初始化mon節點,執行下面的命令
ceph-mon --mkfs -i node3 --keyring /tmp/ceph.mon.keyring
七、爲了防止從新被安裝建立一個空的done文件
touch /var/lib/ceph/mon/ceph-node3/done
八、建立一個空的初始化文件
touch /var/lib/ceph/mon/ceph-node3/sysvinit
九、啓動ceph進程
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.node3
一、查看集羣狀態
[root@node1 ~]# ceph -w
cluster f11240d4-86b1-49ba-aacc-6d3d37b24cc4
health HEALTH_ERR 192 pgs stuck inactive; 192 pgs stuck unclean; no osds
monmap e2: 3 mons at {node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6789/0}, election epoch 8, quorum 0,1,2 node1,node2,node3
osdmap e1: 0 osds: 0 up, 0 in
pgmap v2: 192 pgs, 3 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
192 creating
二、查看ceph pool
ceph osd lspools
添加osd節點
添加第一塊osd節點
一、建立一個OSD,生成一個osd number
[root@node1 ~]# ceph osd create
0
二、爲osd節點建立一個osd目錄
[root@node1 ~]# mkdir -p /var/lib/ceph/osd/ceph-0
三、格式化已準備好的osd硬盤(格式化爲xfs格式)
[root@node1 ~]# mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb isize=256 agcount=4, agsize=1310720 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=5242880, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
四、掛在目錄
[root@node1 ~]# mount -o user_xattr /dev/sdb /var/lib/ceph/osd/ceph-0
mount: wrong fs type, bad option, bad superblock on /dev/sdb,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so
執行上面的命令會報錯
解決的辦法是:用下面的兩條命令替代上面的一條命令。
[root@node1 ~]# mount /dev/sdb /var/lib/ceph/osd/ceph-0
[root@node1 ~]# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-0
查看掛載的狀況
[root@node1 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
vmware-vmblock on /var/run/vmblock-fuse type fuse.vmware-vmblock (rw,nosuid,nodev,default_permissions,allow_other)
/dev/sdb on /var/lib/ceph/osd/ceph-1 type xfs (rw,user_xattr)
把上面的掛載信息寫入分區表
[root@node1 ~]# vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-0 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-0 xfs remount,user_xattr 0 0
五、初始化osd數據目錄
[root@node1 ~]# ceph-osd -i 0 --mkfs --mkkey
六、註冊osd的認證密鑰
[root@node1 ~]# ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-0/keyring
七、爲此osd節點建立一個crush map
[root@node1 ~]# ceph osd crush add-bucket node1 host
added bucket node1 type host to crush map
八、Place the Ceph Node under the root default
[root@node1 ~]# ceph osd crush move node1 root=default
moved item id -2 name 'node1' to location {root=default} in crush map
九、
[root@node1 ~]# ceph osd crush add osd.0 1.0 host=node1
add item id 0 name 'osd.0' weight 1 at location {host=node1} to crush map
十、建立一個初始化目錄
[root@node1 ~]# touch /var/lib/ceph/osd/ceph-0/sysvinit
十一、啓動osd進程
/etc/init.d/ceph start osd.0
十二、查看osd目錄樹
[root@node1 ~]# ceph osd tree
# id weight type name up/down reweight
-1 1 root default
-2 1 host node1
0 1 osd.0 up 1
添加第二個osd節點
一、建立一個OSD,生成一個osd number
[root@node2 ~]# ceph osd create
1
二、爲osd節點建立一個osd目錄
[root@node2 ~]# mkdir -p /var/lib/ceph/osd/ceph-1
三、格式化已準備好的osd硬盤,並掛在到上一步建立的osd目錄(格式化爲xfs格式)
[root@node2 ~]# mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb isize=256 agcount=4, agsize=1310720 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=5242880, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
四、掛在目錄
[root@node2 ~]# mount -o user_xattr /dev/sdb /var/lib/ceph/osd/ceph-1
mount: wrong fs type, bad option, bad superblock on /dev/sdb,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so
執行上面的命令會報錯
解決的辦法是:用下面的兩條命令替代上面的一條命令。
[root@node2 ~]# mount /dev/sdb /var/lib/ceph/osd/ceph-1
[root@node2 ~]# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-1
查看掛載的狀況
[root@node2 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
vmware-vmblock on /var/run/vmblock-fuse type fuse.vmware-vmblock (rw,nosuid,nodev,default_permissions,allow_other)
/dev/sdb on /var/lib/ceph/osd/ceph-1 type xfs (rw,user_xattr)
把上面的掛載信息寫入分區表
[root@node2 ~]# vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-1 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-1 xfs remount,user_xattr 0 0
五、初始化osd數據目錄
[root@node2 ~]# ceph-osd -i 1 --mkfs --mkkey
2014-06-25 23:17:37.633040 7fa8fd06b7a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:17:37.740713 7fa8fd06b7a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:17:37.744937 7fa8fd06b7a0 -1 filestore(/var/lib/ceph/osd/ceph-1) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2014-06-25 23:17:37.812999 7fa8fd06b7a0 -1 created object store /var/lib/ceph/osd/ceph-1 journal /var/lib/ceph/osd/ceph-1/journal for osd.1 fsid f11240d4-86b1-49ba-aacc-6d3d37b24cc4
2014-06-25 23:17:37.813192 7fa8fd06b7a0 -1 auth: error reading file: /var/lib/ceph/osd/ceph-1/keyring: can't open /var/lib/ceph/osd/ceph-1/keyring: (2) No such file or directory
2014-06-25 23:17:37.814050 7fa8fd06b7a0 -1 created new key in keyring /var/lib/ceph/osd/ceph-1/keyring
六、註冊osd的認證密鑰
[root@node2 ~]# ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-1/keyring
added key for osd.1
七、爲此osd節點建立一個crush map
[[root@node2 ~]# ceph osd crush add-bucket node2 host
added bucket node2 type host to crush map
八、Place the Ceph Node under the root default
[root@node2 ~]# ceph osd crush move node2 root=default
moved item id -3 name 'node2' to location {root=default} in crush map
九、
[root@node2 ~]# ceph osd crush add osd.1 1.0 host=node2
add item id 1 name 'osd.1' weight 1 at location {host=node2} to crush map
十、建立一個初始化目錄
[root@node2 ~]# touch /var/lib/ceph/osd/ceph-1/sysvinit
十一、啓動osd進程
[root@node2 ~]# /etc/init.d/ceph start osd.1
=== osd.1 ===
create-or-move updated item name 'osd.1' weight 0.02 at location {host=node2,root=default} to crush map
Starting Ceph osd.1 on node2...
starting osd.1 at :/0 osd_data /var/lib/ceph/osd/ceph-1 /var/lib/ceph/osd/ceph-1/journal
十二、查看osd目錄樹
[root@node2 ~]# ceph osd tree
# id weight type name up/down reweight
-1 2 root default
-2 1 host node1
0 1 osd.0 up 1
-3 1 host node2
1 1 osd.1 up 1
添加第三塊osd節點
一、建立一個OSD,生成一個osd number
[root@node3 ~]# ceph osd create
2
二、爲osd節點建立一個osd目錄
[root@node3 ~]# mkdir -p /var/lib/ceph/osd/ceph-2
三、格式化已準備好的osd硬盤(格式化爲xfs格式)
[root@node3 ~]# mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb isize=256 agcount=4, agsize=1310720 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=5242880, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
四、掛在目錄
[root@node3 ~]# mount -o user_xattr /dev/sdb /var/lib/ceph/osd/ceph-2
mount: wrong fs type, bad option, bad superblock on /dev/sdb,
missing codepage or helper program, or other error
In some cases useful info is found in syslog - try
dmesg | tail or so
執行上面的命令會報錯
解決的辦法是:用下面的兩條命令替代上面的一條命令。
[root@node3 ~]# mount /dev/sdb /var/lib/ceph/osd/ceph-2
[root@node3 ~]# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-2
查看掛載的狀況
[root@node2 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
vmware-vmblock on /var/run/vmblock-fuse type fuse.vmware-vmblock (rw,nosuid,nodev,default_permissions,allow_other)
/dev/sdb on /var/lib/ceph/osd/ceph-1 type xfs (rw,user_xattr)
把上面的掛載信息寫入分區表
[root@node3 ~]# vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-2 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-2 xfs remount,user_xattr 0 0
五、初始化osd數據目錄
[root@node3 ~]# ceph-osd -i 2 --mkfs --mkkey
2014-06-25 23:29:01.734251 7f52915927a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:29:01.849158 7f52915927a0 -1 journal FileJournal::_open: disabling aio for non-block journal. Use journal_force_aio to force use of aio anyway
2014-06-25 23:29:01.852189 7f52915927a0 -1 filestore(/var/lib/ceph/osd/ceph-2) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2014-06-25 23:29:01.904476 7f52915927a0 -1 created object store /var/lib/ceph/osd/ceph-2 journal /var/lib/ceph/osd/ceph-2/journal for osd.2 fsid f11240d4-86b1-49ba-aacc-6d3d37b24cc4
2014-06-25 23:29:01.904712 7f52915927a0 -1 auth: error reading file: /var/lib/ceph/osd/ceph-2/keyring: can't open /var/lib/ceph/osd/ceph-2/keyring: (2) No such file or directory
2014-06-25 23:29:01.905376 7f52915927a0 -1 created new key in keyring /var/lib/ceph/osd/ceph-2/keyring
[root@node3 ~]#
六、註冊osd的認證密鑰
[root@node3 ~]# ceph auth add osd.2 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-2/keyring
added key for osd.2
七、爲此osd節點建立一個crush map
[root@node3 ~]# ceph osd crush add-bucket node3 host
added bucket node3 type host to crush map
八、Place the Ceph Node under the root default
[root@node3 ~]# ceph osd crush move node3 root=default
moved item id -4 name 'node3' to location {root=default} in crush map
九、
[root@node3 ~]# ceph osd crush add osd.2 1.0 host=node3
add item id 2 name 'osd.2' weight 1 at location {host=node3} to crush map
十、建立一個初始化目錄
[root@node3 ~]# touch /var/lib/ceph/osd/ceph-2/sysvinit
十一、啓動osd進程
[root@node3 ~]# /etc/init.d/ceph start osd.2
=== osd.2 ===
create-or-move updated item name 'osd.2' weight 0.02 at location {host=node3,root=default} to crush map
Starting Ceph osd.2 on node3...
starting osd.2 at :/0 osd_data /var/lib/ceph/osd/ceph-2 /var/lib/ceph/osd/ceph-2/journal
十二、查看osd目錄樹
[root@node3 ~]# ceph osd tree
# id weight type name up/down reweight
-1 3 root default
-2 1 host node1
0 1 osd.0 up 1
-3 1 host node2
1 1 osd.1 up 1
-4 1 host node3
2 1 osd.2 up 1
添加第三塊osd節點
一、建立一個OSD,生成一個osd number
ceph osd create
二、爲osd節點建立一個osd目錄
mkdir -p /var/lib/ceph/osd/ceph-3
三、格式化已準備好的osd硬盤(格式化爲xfs格式)
mkfs.xfs -f /dev/sdb
四、掛在目錄
mount /dev/sdb /var/lib/ceph/osd/ceph-3
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-3
把上面的掛載信息寫入分區表
vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-3 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-3 xfs remount,user_xattr 0 0
五、初始化osd數據目錄
ceph-osd -i 3 --mkfs --mkkey
六、註冊osd的認證密鑰
ceph auth add osd.3 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-3/keyring
七、爲此osd節點建立一個crush map
ceph osd crush add-bucket node4 host
八、Place the Ceph Node under the root default
[root@node4 ~]# ceph osd crush move node4 root=default
九、
[root@node4 ~]# ceph osd crush add osd.3 1.0 host=node4
十、建立一個初始化目錄
[root@node3 ~]# touch /var/lib/ceph/osd/ceph-3/sysvinit
十一、啓動osd進程
[root@node4 ~]# /etc/init.d/ceph start osd.3
添加元數據服務器
添加第一個元數據服務器
一、爲mds元數據服務器建立一個目錄
[root@node1 ~]# mkdir -p /var/lib/ceph/mds/ceph-node1
二、爲bootstrap-mds客戶端建立一個密鑰 注:(若是下面的密鑰在目錄裏已生成能夠省略此步驟)
[root@node1 ~]# ceph-authtool --create-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring --gen-key -n client.bootstrap-mds
三、在ceph auth庫中建立bootstrap-mds客戶端,賦予權限添加以前建立的密鑰 注(查看ceph auth list 用戶權限認證列表 若是已有client.bootstrap-mds此用戶,此步驟能夠省略)
[root@node1 ~]# ceph auth add client.bootstrap-mds mon 'allow profile bootstrap-mds' -i /var/lib/ceph/bootstrap-mds/ceph.keyring
added key for client.bootstrap-mds
四、在root家目錄裏建立ceph.bootstrap-mds.keyring文件
touch /root/ceph.bootstrap-mds.keyring
五、把keyring /var/lib/ceph/bootstrap-mds/ceph.keyring裏的密鑰導入家目錄下的ceph.bootstrap-mds.keyring文件裏
ceph-authtool --import-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring ceph.bootstrap-mds.keyring
六、在ceph auth庫中建立mds.node1用戶,並賦予權限和建立密鑰,密鑰保存在/var/lib/ceph/mds/ceph-node1/keyring文件裏
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node1 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-node1/keyring
七、爲mds建立一個初始化文件用於啓動使用(此文件爲空文件)
[root@node1 ~]# touch /var/lib/ceph/mds/ceph-node1/sysvinit
八、爲了防止從新被安裝建立一個空的done文件
[root@node1 ~]# touch /var/lib/ceph/mds/ceph-node1/done
九、狀況mds服務進程
[root@node1 ~]# service ceph start mds.node1
=== mds.node1 ===
Starting Ceph mds.node1 on node1...
starting mds.node1 at :/0
添加第二個元數據服務器
一、在node2節點上爲mds元數據服務器建立一個目錄
[root@node2 ~]# mkdir -p /var/lib/ceph/mds/ceph-node2
二、在node2節點上建立一個bootstrap-mds目錄
[root@node2 ~]# mkdir -p /var/lib/ceph/bootstrap-mds/
三、在node1節點上覆制/var/lib/ceph/bootstrap-mds/ceph.keyring、/root/ceph.bootstrap-mds.keyring文件到node2節點
[root@node1 ~]# scp /var/lib/ceph/bootstrap-mds/ceph.keyring node2:/var/lib/ceph/bootstrap-mds/
[root@node1 ~]# scp /root/ceph.bootstrap-mds.keyring node2:/root/
四、在node1節點複製/var/lib/ceph/mds/ceph-node1/*裏的全部文件到node2
[root@node2 ~]# scp /var/lib/ceph/mds/ceph-node1/sysvinit node2://var/lib/ceph/mds/ceph-node2/
五、在ceph auth庫中建立mds.node2用戶,並賦予權限和建立密鑰,密鑰保存在/var/lib/ceph/mds/ceph-node1/keyring文件裏
[root@node2 ~]# ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node2 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-node2/keyring
七、爲了防止從新被安裝建立一個空的done文件
[root@node2 ~]# touch /var/lib/ceph/mds/ceph-node2/done
七、狀況mds服務進程
[root@node1 ~]# service ceph start mds.node2
添加第二個元數據服務器
一、在node3節點上爲mds元數據服務器建立一個目錄
[root@node3 ~]# mkdir -p /var/lib/ceph/mds/ceph-node3
二、在node3節點上建立一個bootstrap-mds目錄
[root@node3 ~]# mkdir -p /var/lib/ceph/bootstrap-mds/
三、在node1節點上覆制/var/lib/ceph/bootstrap-mds/ceph.keyring、/root/ceph.bootstrap-mds.keyring文件到node3節點
[root@node1 ~]# scp /var/lib/ceph/bootstrap-mds/ceph.keyring node3:/var/lib/ceph/bootstrap-mds/
[root@node1 ~]# scp /root/ceph.bootstrap-mds.keyring node3:/root/
四、在node1節點複製/var/lib/ceph/mds/ceph-node1/*裏的全部文件到node3
[root@node3 ~]# scp /var/lib/ceph/mds/ceph-node1/sysvinit node3://var/lib/ceph/mds/ceph-node3/
五、在ceph auth庫中建立mds.node2用戶,並賦予權限和建立密鑰,密鑰保存在/var/lib/ceph/mds/ceph-node1/keyring文件裏
[root@node3 ~]# ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node3 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-node3/keyring
七、爲了防止從新被安裝建立一個空的done文件
[root@node3 ~]# touch /var/lib/ceph/mds/ceph-node3/done
七、狀況mds服務進程
[root@node1 ~]# service ceph start mds.node3
1三、查看集羣狀態
[root@node1 ~]# ceph -w
cluster f11240d4-86b1-49ba-aacc-6d3d37b24cc4
health HEALTH_OK
monmap e2: 3 mons at {node1=10.240.240.211:6789/0,node2=10.240.240.212:6789/0,node3=10.240.240.213:6789/0}, election epoch 8, quorum 0,1,2 node1,node2,node3
osdmap e23: 3 osds: 3 up, 3 in
pgmap v47: 192 pgs, 3 pools, 0 bytes data, 0 objects
3175 MB used, 58234 MB / 61410 MB avail
192 active+clean
2014-06-25 23:32:48.340284 mon.0 [INF] pgmap v47: 192 pgs: 192 active+clean; 0 bytes data, 3175 MB used, 58234 MB / 61410 MB avail
5、安裝客戶端client並進行RBD、cephFS掛載
安裝軟件包
一、安裝軟件包
[root@ceph-client ceph]#yum install -y ceph
二、升級系統內核
kernel 2.6.34之前的版本是沒有Module rbd的,把系統內核版本升級到最新
rpm --import http://elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://elrepo.org/elrepo-release-6-5.el6.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install kernel-ml
安裝完內核後修改/etc/grub.conf配置文件使機器重啓後生效
修改配置文件中的 Default=1 to Default=0
RBD方式掛載
一、新建一個ceph pool
[root@client ~]# ceph osd pool create jiayuan 256
二、在pool中新建一個鏡像
[root@client ~]# rbd create test-1 --size 40960 -p jiayuan
三、把鏡像映射到pool塊設備中
[root@client ~]# rbd map test-1 -p jiayuan
四、查看鏡像映射map
[root@client ~]# rbd showmapped
id pool p_w_picpath snap device
0 jiayuan test-1 - /dev/rbd0
五、格式化映射的設備塊
[root@client ~]# mkfs.ext4 -m0 /dev/rbd0
六、掛載新建的分區
[root@client ~]# mkdir /mnt/ceph-rbd-test-1
[root@client ~]# mount /dev/rbd0 /mnt/ceph-rbd-test-1/
[root@client ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 19G 3.0G 15G 17% /
tmpfs 242M 72K 242M 1% /dev/shm
/dev/sda1 283M 76M 188M 29% /boot
/dev/rbd0 40G 48M 40G 1% /mnt/ceph-rbd-test-1
七、進入新建的分區並dd測試性能
[root@client ~]# cd /mnt/ceph-rbd-test-1/
cephFS掛載
一、建立一個數據目錄,把ceph池掛載到建立的數據目錄。
[root@ceph-client ~]# mkdir /mnt/mycephfs
[root@ceph-client ~]# mount -t ceph 10.240.240.211:6789:/ /mnt/mycephfs -v -o name=admin,secret=AQDT9pNTSFD6NRAAoZkAgx21uGQ+DM/k0rzxow==
10.240.240.211:6789:/ on /mnt/mycephfs type ceph (rw,name=admin,secret=AQDT9pNTSFD6NRAAoZkAgx21uGQ+DM/k0rzxow==)
或者執行下面的命令掛載
[root@ceph-client ~]# mount -t ceph 10.240.240.211:6789:/ /mnt/mycephfs -v -o name=admin,secretfile=/etc/ceph/ceph.client.admin.keyring
#上述命令中的name和secret參數值來自monitor的/etc/ceph/keyring文件:
[root@node1 ~]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQDT9pNTSFD6NRAAoZkAgx21uGQ+DM/k0rzxow==
二、若果有多個mon監控節點,能夠掛載多可節點,保證了cephFS的安全行,當有一個節點down的時候不影響寫入數據
[root@client ~]# mount.ceph node1,node2,node3:/ /mnt/mycephfs -v -o name=admin,secret=AQDvxaxTaG4uBRAA9fKTwV8iqPjm/K+B4+qpEw==
parsing options: name=admin,secret=AQDvxaxTaG4uBRAA9fKTwV8iqPjm/K+B4+qpEw==
[root@client ~]#
[root@client ~]#
[root@client ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 19G 3.0G 15G 17% /
tmpfs 242M 72K 242M 1% /dev/shm
/dev/sda1 283M 76M 188M 29% /boot
10.240.240.211,10.240.240.212,10.240.240.213:/
20G 3.5G 17G 18% /mnt/mycephfs
三、把掛載的信息寫到fstab裏
[root@client ~]# vi /etc/fstab
10.240.240.211,10.240.240.212,10.240.240.213:/ /mnt/mycephfs ceph name=admin,secret=AQDvxaxTaG4uBRAA9fKTwV8iqPjm/K+B4+qpEw==,noatime 0 2
六:ceph集羣卸載(執行下面的兩條命令就能夠把全部節點上的ceph軟件及ceph集羣卸載掉)
[root@node1 ~]# ceph-deploy purge node1 node2 node3 node4
[root@node1 ~]# ceph-deploy purgedata node1 node2 node3 node4
附錄、ceph快速配置腳本
創建第一個mon節點
一、在node1節點編輯ceph配置文件
vi /etc/ceph/ceph.conf
[global]
fsid = f11240d4-86b1-49ba-aacc-6d3d37b24cc4
mon initial members = node1,node2,node3
mon host = 10.39.101.1,10.39.101.2,10.39.101.3
public network = 10.39.101.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
filestore xattr use omap = true
osd pool default size = 3
osd pool default min size = 1
osd crush chooseleaf type = 1
osd_mkfs_type = xfs
max mds = 5
mds max file size = 100000000000000
mds cache size = 1000000
mon osd down out interval = 900
cluster_network = 10.39.102.0/24
[mon]
mon clock drift allowed = .50
在node1節點
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
mkdir -p /var/lib/ceph/mon/ceph-node1
mkdir -p /var/lib/ceph/bootstrap-osd/
ceph-authtool -C /var/lib/ceph/bootstrap-osd/ceph.keyring
ceph-mon --mkfs -i node1 --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-node1/done
touch /var/lib/ceph/mon/ceph-node1/sysvinit
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.node1
創建第二個mon節點
一、在node2節點
mkdir /var/lib/ceph/bootstrap-osd/
mkdir -p /var/lib/ceph/mon/ceph-node2
二、在node1節點
scp /etc/ceph/* node2:/etc/ceph/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node2:/var/lib/ceph/bootstrap-osd/
scp /tmp/ceph.mon.keyring node2:/tmp/
三、在node2節點
ceph-mon --mkfs -i node2 --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-node2/done
touch /var/lib/ceph/mon/ceph-node2/sysvinit
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.node2
創建第三個mon節點
一、在node3節點
mkdir /var/lib/ceph/bootstrap-osd/
mkdir -p /var/lib/ceph/mon/ceph-node3
二、在node1節點
scp /etc/ceph/* node3:/etc/ceph/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring node3:/var/lib/ceph/bootstrap-osd/
scp /tmp/ceph.mon.keyring node3:/tmp/
三、在node3節點
ceph-mon --mkfs -i node3 --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-node3/done
touch /var/lib/ceph/mon/ceph-node3/sysvinit
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.node3
添加osd節點
添加第一塊osd節點
一、在node1節點
ceph osd create
mkdir -p /var/lib/ceph/osd/ceph-0
mkfs.xfs -f /dev/sdb
mount /dev/sdb /var/lib/ceph/osd/ceph-0
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-0
ceph-osd -i 0 --mkfs --mkkey
ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-0/keyring
ceph osd crush add-bucket node1 host
ceph osd crush move node1 root=default
ceph osd crush add osd.0 1.0 host=node1
touch /var/lib/ceph/osd/ceph-0/sysvinit
/etc/init.d/ceph start osd.0
二、在node1節點添加分區表
[root@node1 ~]# vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-0 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-0 xfs remount,user_xattr 0 0
添加第二個osd節點
一、在node2節點
ceph osd create
mkdir -p /var/lib/ceph/osd/ceph-1
mkfs.xfs -f /dev/sdb
mount /dev/sdb /var/lib/ceph/osd/ceph-1
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-1
ceph-osd -i 1 --mkfs --mkkey
ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-1/keyring
ceph osd crush add-bucket node2 host
ceph osd crush move node2 root=default
ceph osd crush add osd.1 1.0 host=node2
touch /var/lib/ceph/osd/ceph-1/sysvinit
/etc/init.d/ceph start osd.1
二、在node2節點添加分區表
[root@node1 ~]# vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-1 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-1 xfs remount,user_xattr 0 0
添加第三塊osd節點
一、在node3節點
ceph osd create
mkdir -p /var/lib/ceph/osd/ceph-2
mkfs.xfs -f /dev/sdb
mount /dev/sdb /var/lib/ceph/osd/ceph-2
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-2
ceph-osd -i 2 --mkfs --mkkey
ceph auth add osd.2 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-2/keyring
ceph osd crush add-bucket node3 host
ceph osd crush move node3 root=default
ceph osd crush add osd.2 1.0 host=node3
touch /var/lib/ceph/osd/ceph-2/sysvinit
/etc/init.d/ceph start osd.2
二、在node3節點添加分區表
[root@node1 ~]# vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-2 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-2 xfs remount,user_xattr 0 0
添加第四塊osd節點
一、在node1節點
scp /etc/ceph/* node4:/etc/ceph/
二、在node4節點
ceph osd create
mkdir -p /var/lib/ceph/osd/ceph-3
mkfs.xfs -f /dev/sdb
mount /dev/sdb /var/lib/ceph/osd/ceph-3
mount -o remount,user_xattr /var/lib/ceph/osd/ceph-3
ceph-osd -i 3 --mkfs --mkkey
ceph auth add osd.3 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-3/keyring
ceph osd crush add-bucket node4 host
ceph osd crush move node4 root=default
ceph osd crush add osd.3 1.0 host=node4
touch /var/lib/ceph/osd/ceph-3/sysvinit
/etc/init.d/ceph start osd.3
三、在node4節點添加分區表
[root@node1 ~]# vi /etc/fstab
/dev/sdb /var/lib/ceph/osd/ceph-3 xfs defaults 0 0
/dev/sdb /var/lib/ceph/osd/ceph-3 xfs remount,user_xattr 0 0
添加元數據服務器
添加第一個元數據服務器
一、在node1節點
mkdir -p /var/lib/ceph/mds/ceph-node1
touch /root/ceph.bootstrap-mds.keyring
ceph-authtool --import-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring ceph.bootstrap-mds.keyring
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node1 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-node1/keyring
touch /var/lib/ceph/mds/ceph-node1/sysvinit
touch /var/lib/ceph/mds/ceph-node1/done
service ceph start mds.node1
添加第二個元數據服務器
一、在node2節點
mkdir -p /var/lib/ceph/mds/ceph-node2
mkdir -p /var/lib/ceph/bootstrap-mds/
二、在node1節點
scp /var/lib/ceph/bootstrap-mds/ceph.keyring node2:/var/lib/ceph/bootstrap-mds/
scp /root/ceph.bootstrap-mds.keyring node2:/root/
scp /var/lib/ceph/mds/ceph-node1/sysvinit node2://var/lib/ceph/mds/ceph-node2/
三、在node2節點
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node2 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-node2/keyring
touch /var/lib/ceph/mds/ceph-node2/done
service ceph start mds.node2
添加第二個元數據服務器
一、在node3節點
mkdir -p /var/lib/ceph/mds/ceph-node3
mkdir -p /var/lib/ceph/bootstrap-mds/
二、在node1節點
scp /var/lib/ceph/bootstrap-mds/ceph.keyring node3:/var/lib/ceph/bootstrap-mds/
scp /root/ceph.bootstrap-mds.keyring node3:/root/
scp /var/lib/ceph/mds/ceph-node1/sysvinit node3://var/lib/ceph/mds/ceph-node3/
三、在node3節點
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node3 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-node3/keyring
touch /var/lib/ceph/mds/ceph-node3/done
service ceph start mds.node3