mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
mv /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup
mv /etc/yum.repos.d/epel-testing.repo /etc/yum.repos.d/epel-testing.repo.backup
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum install centos-release-ceph-luminous -y
yum update -y
yum installl ceph-deploy -y
yum install ntp ntpdate ntp-doc -y
yum install openssh-server -y
#生成祕鑰對
ssh-keygen
#將管理節點公鑰注入到其餘server
ssh-copy-id SERVER-
yum install yum-plugin-priorities -y
#在管理節點使用ceph-deploy部署ceph cluster
#建立部署目錄
mkdir ~/my-cluster
cd ~/my-cluster
#從頭開始(非第一次部署ceph,清理環境)
ceph-deploy purge {ceph-node}[{ceph-node}]
ceph-deploy purgedata {ceph-node}[{ceph-node}]
ceph-deploy forgetkeys
rm ceph.*
##建立集羣
#建立monitor節點
#例如:ceph-deploy new {initial-monitor-node(s)}
ceph-deploy new node1
#安裝ceph包到各個節點
ceph-deploy install node1 node2 node3
#初始化monitor節點,獲取keys
ceph-deploy mon create-initial
#上述命令執行成功後,你會在當前目錄下獲得如下keyring文件
ceph.client.admin.keyring
ceph.bootstrap-mgr.keyring
ceph.bootstrap-osd.keyring
ceph.bootstrap-mds.keyring
ceph.bootstrap-rgw.keyring
ceph.bootstrap-rbd.keyring
#將keyring文件分發到各個節點
ceph-deploy admin node1 node2 node3
#部署manager(l版本之上才須要)
ceph-deploy mgr create node1
#部署osd節點(這裏使用虛擬機,掛載了/dev/vdb卷)
ceph-deploy osd create node1:/dev/vdb node2:/dev/vdb node3:/dev/vdb
#檢查集羣,在管理節點執行
ceph health
ceph -s
#添加metadate server
ceph-deploy mds create node1
#添加monitors
ceph-deploy mon add node2 node3
#添加新的monitor節點以後,ceph會同步monitor,選舉表明quorum
#查看quorum狀態
ceph quorum_status --format json-pretty
#添加managers
#manager使用active/standby模式,多節點部署,能夠在master down時,無縫頂替
ceph-deploy mgr create node2 node3
#添加rgw實例
#爲了使用ceph object gateway,須要部署rgw實
例ceph-deploy rgw create node1
#rgw默認監聽端口是7480,能夠經過編輯ceph.conf修改端口
[client]
rgw frontends = civetweb port=80
ceph osd map {poolname}{object-name}
#建立一個對象,測試文件
echo {Test-data}> testfiles.txt
ceph osd pool create mytest 8
#使用rados put 命令指定對象名,含有對象數據的測試文件,pool name
#rados put {object-name} {file-path} --pool=mytest
rados put test-object-1 testfile.txt --pool=mytest
#驗證ceph集羣已經存儲了此object
rados -p mytest ls
#找到對象位置
#ceph osd map {pool-name} {object-name}
ceph osd map mytest test-oobject-1
#ceph會輸出對象位置
osdmap e537 pool 'mytest'(1) object 'test-object-1'-> pg 1.d1743484(1.4)-> up [1,0] acting [1,0]
#刪除測試對象object
rados rm test-object-1--pool-mytest
#刪除mytest pool
ceph osd pool rm mytest
#安裝lvm
yum install lvm2 -y
#建立虛擬磁盤
mkdir /ceph && dd if=/dev/zero of=/ceph/ceph-volumes.img bs=1M count=10240 oflag=direct
sgdisk -g --clear /ceph/ceph-volumes.img
vgcreate ceph-volumes $(losetup --show -f /ceph/ceph-volumes.img)
lvcreate -L 9G -n ceph1 ceph-volumes
mkfs.xfs -f /dev/ceph-volumes/ceph1
#掛載
mkdir -p /var/local/osd1
mount /dev/ceph-volumes/ceph1 /var/local/osd1