手工安裝https://www.jianshu.com/p/b8f085ca0307網絡
cat << EOF >> /etc/hosts
172.31.240.49 ceph-mon01
EOFssh
cat << EOF > /etc/yum.repos.d/ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS/
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
EOFurl
useradd cephd
echo 'CephIl#i42' | passwd --stdin cephd
echo "cephd ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/cephd
chmod 0440 /etc/sudoers.d/cephdspa
在ceph-mon01上:
mkfs.xfs /dev/sdb部署
yum -y install expect
su - cephd
expect << EOF
spawn ssh-keygen -t rsa
expect {
"Enter file in which to save the key (/home/cephd/.ssh/id_rsa):" { send "\r"; exp_continue}
"Enter passphrase (empty for no passphrase):" { send "\r"; exp_continue}
"Enter same passphrase again:" { send "\r"; exp_continue}
}
EOF
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 400 ~/.ssh/authorized_keys
for i in {"ceph-mon01",} ;do ssh-copy-id -i ~/.ssh/id_rsa.pub cephd@$i;doneget
su - cephd
cat << EOF > ~/.ssh/config
Host ceph-mon01
Hostname ceph-mon01
User cephd
EOF
chmod 600 ~/.ssh/configit
sudo yum -y install ceph-deploy
ceph-deploy --versionio
mkdir ~/ceph-cluster集羣
cd ~/ceph-cluster
ceph-deploy install --no-adjust-repos ceph-mon01
ceph --version配置
cd ~/ceph-cluster
ceph-deploy new --public-network 172.31.240.0/24 ceph-mon01
cd ~/ceph-cluster
ceph-deploy --overwrite-conf mon create-initial
如有報錯:[ERROR ] Some monitors have still not reached quorum
報錯緣由:Monitor監視器節點的hostname與/etc/hosts不符
解決辦法:修改主機名後,用下面的方法清理環境,而後重裝便可。
su - cephd
ceph-deploy purge ceph-mon01
ceph-deploy purgedata ceph-mon01
ceph-deploy forgetkeys
rm -rf ~/ceph-cluster/*
cd ~/ceph-cluster
ceph-deploy --overwrite-conf admin ceph-mon01
cd ~/ceph-cluster
ceph-deploy osd create ceph-mon01 --data /dev/sdb
cd ~/ceph-cluster
ceph-deploy mgr create ceph-mon01:ceph-mon01_mgr
systemctl status ceph-mgr@ceph-mon01_mgr
ceph -sceph daemon osd.0 config get mon_max_pg_per_osdceph osd tree