1,環境配置
#在全部節點配置YUM: #清空原來自帶配置文件: cd /etc/yum.repos.d/ mkdir /tmp/bak mv * /tmp/bak/ #配置系統源碼,epel源: curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo yum install wget -y wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo #YUM優先級別: yum -y install yum-plugin-priorities.noarch #配置ceph源: cat << EOF | tee /etc/yum.repos.d/ceph.repo [Ceph] name=Ceph packages for $basearch baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/\$basearch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc priority=1 [Ceph-noarch] name=Ceph noarch packages baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/noarch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc priority=1 [ceph-source] name=Ceph source packages baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/SRPMS enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc EOF #關閉防火牆: systemctl stop firewalld systemctl disable firewalld systemctl status firewalld #配置主機名稱: ceph1節點: hostnamectl --static set-hostname ceph1 ceph2節點: hostnamectl --static set-hostname ceph2 ceph3節點: hostnamectl --static set-hostname ceph3 #全部節點配置hosts文件: /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.0.231 ceph1 192.168.0.232 ceph2 192.168.0.233 ceph3 #全部節點NTP配置: 在全部集羣和客戶端節點安裝NTP,修改配置。 yum -y install ntp ntpdate 以ceph1爲NTP服務端節點,在ceph1新建NTP文件。 vi /etc/ntp.conf 並新增以下內容做爲NTP服務端: restrict 127.0.0.1 restrict ::1 restrict 192.168.3.0 mask 255.255.255.0 //ceph1的網段與掩碼 server 127.127.1.0 fudge 127.127.1.0 stratum 8 在ceph二、ceph3及全部客戶機節點新建NTP文件。 vi /etc/ntp.conf 並新增以下內容做爲客戶端: server 192.168.3.166 systemctl start ntpd systemctl enable ntpd systemctl status ntpd #ssh配置,在ceph1節點生成公鑰,併發放到各個主機/客戶機節點。: ssh-keygen -t rsa #回車採起默認配置 for i in {1..3}; do ssh-copy-id ceph$i; done #根據提示輸入yes及節點密碼 for i in {1..3}; do ssh-copy-id client$i; done #在全部節點,關閉SELinux sed -i 's/enforcing/disabled/' /etc/selinux/config setenforce 0
2. 安裝Ceph軟件
使用yum install安裝ceph的時候會默認安裝當前已有的最新版,若是不想安裝最新版本,能夠在/etc/yum.conf文件中加以限制。linux
2.1 在全部集羣和客戶端節點安裝Ceph
yum -y install ceph ceph -v命令查看版本: [root@ceph1 ~]# ceph -v ceph version 14.2.9 (581f22da52345dba46ee232b73b990f06029a2a0) nautilus (stable) [root@ceph2 ~]# ceph -v ceph version 14.2.9 (581f22da52345dba46ee232b73b990f06029a2a0) nautilus (stable) [root@ceph3 ~]# ceph -v ceph version 14.2.9 (581f22da52345dba46ee232b73b990f06029a2a0) nautilus (stable)
2.2 在ceph1節點額外安裝ceph-deploy。
yum -y install ceph-deploy
3.部署MON節點
3.1 建立目錄生成配置文件
mkdir cluster
cd cluster
ceph-deploy new ceph1 ceph2 ceph3
併發
[root@ceph1 ~]# cd cluster/ [root@ceph1 cluster]# ceph-deploy new ceph1 ceph2 ceph3 [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy new ceph1 ceph2 ceph3 [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] func : <function new at 0x7ffb7dc07de8> [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7ffb7d58c6c8> [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] ssh_copykey : True [ceph_deploy.cli][INFO ] mon : ['ceph1', 'ceph2', 'ceph3'] [ceph_deploy.cli][INFO ] public_network : None [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] cluster_network : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.cli][INFO ] fsid : None [ceph_deploy.new][DEBUG ] Creating new cluster named ceph [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds [ceph1][DEBUG ] connected to host: ceph1 [ceph1][DEBUG ] detect platform information from remote host [ceph1][DEBUG ] detect machine type [ceph1][DEBUG ] find the location of an executable [ceph1][INFO ] Running command: /usr/sbin/ip link show [ceph1][INFO ] Running command: /usr/sbin/ip addr show [ceph1][DEBUG ] IP addresses found: [u'192.168.0.231'] [ceph_deploy.new][DEBUG ] Resolving host ceph1 [ceph_deploy.new][DEBUG ] Monitor ceph1 at 192.168.0.231 [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds [ceph2][DEBUG ] connected to host: ceph1 [ceph2][INFO ] Running command: ssh -CT -o BatchMode=yes ceph2 [ceph2][DEBUG ] connected to host: ceph2 [ceph2][DEBUG ] detect platform information from remote host [ceph2][DEBUG ] detect machine type [ceph2][DEBUG ] find the location of an executable [ceph2][INFO ] Running command: /usr/sbin/ip link show [ceph2][INFO ] Running command: /usr/sbin/ip addr show [ceph2][DEBUG ] IP addresses found: [u'192.168.0.232'] [ceph_deploy.new][DEBUG ] Resolving host ceph2 [ceph_deploy.new][DEBUG ] Monitor ceph2 at 192.168.0.232 [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds [ceph3][DEBUG ] connected to host: ceph1 [ceph3][INFO ] Running command: ssh -CT -o BatchMode=yes ceph3 [ceph3][DEBUG ] connected to host: ceph3 [ceph3][DEBUG ] detect platform information from remote host [ceph3][DEBUG ] detect machine type [ceph3][DEBUG ] find the location of an executable [ceph3][INFO ] Running command: /usr/sbin/ip link show [ceph3][INFO ] Running command: /usr/sbin/ip addr show [ceph3][DEBUG ] IP addresses found: [u'192.168.0.233'] [ceph_deploy.new][DEBUG ] Resolving host ceph3 [ceph_deploy.new][DEBUG ] Monitor ceph3 at 192.168.0.233 [ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph1', 'ceph2', 'ceph3'] [ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.0.231', '192.168.0.232', '192.168.0.233'] [ceph_deploy.new][DEBUG ] Creating a random mon key... [ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring... [ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
3.2 初始化密鑰
ceph-deploy mon create-initialless
3.3 將ceph.client.admin.keyring拷貝到各個節點上
ceph-deploy --overwrite-conf admin ceph1 ceph2 ceph3dom
3.4 查看是否配置成功。
[root@ceph1 cluster]# ceph -s cluster: id: ea192428-05d2-437a-8cce-9d187de82dd5 health: HEALTH_OK services: mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m) mgr: no daemons active osd: 0 osds: 0 up, 0 in data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 0 B used, 0 B / 0 B avail pgs:
4 部署MGR節點
ceph-deploy mgr create ceph1 ceph2 ceph3ssh
查看MGR是否部署成功。curl
ceph -sui
[root@ceph1 cluster]# ceph -s cluster: id: ea192428-05d2-437a-8cce-9d187de82dd5 health: HEALTH_WARN OSD count 0 < osd_pool_default_size 3 services: mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 8m) mgr: ceph1(active, since 22s), standbys: ceph2, ceph3 osd: 0 osds: 0 up, 0 in data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 0 B used, 0 B / 0 B avail pgs:
5 部署OSD節點
ceph-deploy osd create --data /dev/sdb ceph1 ceph-deploy osd create --data /dev/sdc ceph1 ceph-deploy osd create --data /dev/sdd ceph1 ceph-deploy osd create --data /dev/sdb ceph2 ceph-deploy osd create --data /dev/sdc ceph2 ceph-deploy osd create --data /dev/sdd ceph2 ceph-deploy osd create --data /dev/sdb ceph3 ceph-deploy osd create --data /dev/sdc ceph3 ceph-deploy osd create --data /dev/sdd ceph3
建立成功後,查看是否正常
[root@ceph1 cluster]# ceph -s cluster: id: ea192428-05d2-437a-8cce-9d187de82dd5 health: HEALTH_OK services: mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 14m) mgr: ceph1(active, since 6m), standbys: ceph2, ceph3 osd: 9 osds: 9 up (since 2m), 9 in (since 2m) data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 9.0 GiB used, 135 GiB / 144 GiB avail pgs:
6 驗證Ceph
建立存儲池
ceph osd pool create vdbench 10 10url
建立塊設備
rbd create image01 --size 200--pool vdbench --image-format 2 --image-feature layering
rbd ls --pool vdbench
spa
[root@ceph1 cluster]# rbd create image01 --size 200 --pool vdbench --image-format 2 --image-feature layering [root@ceph1 cluster]# rbd ls --pool vdbench image01