1、環境準備工做
(1) 節點要求
==》節點配置硬件最低要求
角色 設備 最小配置 推薦配置
-----------------------------------------------------------------------------------------------------------------
ceph-osd RAM 500M RAM for per daemon 1GB RAM for 1TB of storage per daemon
Volume Storage 1x storage drive per daemon >1TB storage drive per daemon
Journal Storage 5GB(default) SSD, >1GB for 1TB of storage per daemon
Network 2x 1GB Ethernet NICs 2x10GB Ethernet NICs
-----------------------------------------------------------------------------------------------------------------
ceph-mon RAM 1 GB per daemon 2 GB per daemon
Disk Space 10 GB per daemon >20 GB per daemon
Network 2x 1GB Ethernet NICs 2x10GB Ethernet NICs
-----------------------------------------------------------------------------------------------------------------
ceph-mds RAM 1 GB minimum per daemon >2GB per daemon
Disk Space 1 MB per daemon >1MB per daemon
Network 2x 1GB Ethernet NICs 2x10GB Ethernet NICs
==》OS環境
CentOS7
linux distribution:3.10.0-229.el7.x86_64
==》搭建環境
a) 電腦*1 (RAM>6G Disk>100G)
b) VirtualBox
c) CentOS7.1(3.10.0-229.el7.x86_64).ISO安裝包
==》基本環境搭建,配置節點
主機名 角色 OS 磁盤
=====================================================================================================
a) admnode deploy-node CentOS7.1(3.10.0-229.el7.x86_64)
b) node1 mon,osd CentOS7.1(3.10.0-229.el7.x86_64) Disk(/dev/sdb capacity:10G)
c) node2 osd CentOS7.1(3.10.0-229.el7.x86_64) Disk(/dev/sdb capacity:10G)
d) node3 osd CentOS7.1(3.10.0-229.el7.x86_64) Disk(/dev/sdb capacity:10G)html
==》配置yum源
cp -r /etc/yum.repos.d/ /etc/yum.repos.d.bak
rm -f /etc/yum.repos.d/CentOS-*
vim /etc/yum.repos.d/ceph.repo
並將以下信息拷貝(注:若是是要在安裝hammer版本,將下面的"rpm-infernalis"替換成"rpm-hammer")
[epel]
name=Ceph epel packages
baseurl=ftp://193.168.140.67/pub/ceph/epel/
enabled=1
priority=2
gpgcheck=0node
[ceph]
name=Ceph packages
baseurl=ftp://193.168.140.67/pub/ceph/rpm_infernalis/
enabled=1
priority=2
gpgcheck=0linux
[update]
name=update
baseurl=ftp://193.168.140.67/pub/updates/
enabled=1
priority=2
gpgcheck=0json
[base]
name=base
baseurl=ftp://193.168.140.67/pub/base/
enabled=1
priority=2
gpgcheck=0vim
查看yum源
yum repolist all
2、Ceph節點安裝配置
yum install -y yum-utils epel-release centos
若是是ceph-deploy節點執行 start
yum install -y ceph-deploy
若是是ceph-deploy節點執行 end服務器
安裝NTP和openssh
yum install -y ntp ntpdate ntp-doc openssh-server網絡
建立Ceph Deploy User,其中{username}部分須要本身指定用戶,並將{username}替換掉
# sudo useradd -d /home/{username} -m {username}
# sudo passwd {username}
例:
useradd -d /home/cephadmin -m cephadmin
sudo passwd cephadmin
確保建立的{username}有sudo權限
# echo "{username} ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/{username}
# sudo chmod 0440 /etc/sudoers.d/{username}
例:
echo "cephadmin ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephadmin
chmod 0440 /etc/sudoers.d/cephadminssh
修改主機名(如admnode,node1,node2)並指定全部節點的主機名和IP之間的對應關係。
hostnamectl set-hostname admnode
vim /etc/hosts
在末尾添加
<node1's IP> admnode
<node2's IP> node1
<node3's IP> node2
例:
10.167.225.111 admnode
10.167.225.114 node1
10.167.225.116 node2tcp
若是是ceph-deploy節點執行 start
設置無密碼登錄SSH
使用自定義用戶(非root用戶),輸入命令ssh-keygen,並所有按回車鍵
ssh-keygen
將上述生成的key拷貝到其餘全部節點,輸入命令
# ssh-copy-id {node1’s hostname}
# ssh-copy-id {node2’s hostname}
# ssh-copy-id {node3’s hostname}
例:
ssh-copy-id node1
ssh node1
exit
若是是ceph-deploy節點執行 end
設置網卡開機自啓動
vim /etc/sysconfig/network-scripts/ifcfg-{iface}
確保設置項:
ONBOOT="yes"
關閉防火牆或打開防火牆中須要的端口6789
systemctl disable firewalld
systemctl stop firewalld
systemctl status firewalld
firewall-cmd --zone=public --add-port=6789/tcp --permanent
sudo visudo
找到Defaults requiretty設置選項
將"Defaults requiretty"修改成"Defaults:ceph !requiretty"
禁用SELinux
sudo setenforce 0
vim /etc/selinux/config
將其中的"SELINUX=enforcing"修改成"SELINUX=permissive"
sudo yum install -y yum-plugin-priorities
sudo vim /etc/yum/pluginconf.d/priorities.conf
確認內容:
[main]
enabled = 1
若是是安裝infernalis版本執行 start
sudo yum install -y systemd
若是是安裝infernalis版本執行 end
若是是安裝hammer版本執行 start
yum install redhat-lsb
若是是安裝hammer版本執行 end
3、最基本節點Ceph Storage Cluster配置(http://docs.ceph.com/docs/master/start/quick-ceph-deploy/)
基本環境搭建,配置節點
主機名 角色 磁盤
================================================================
a) admnode deploy-node
b) node1 mon1 Disk(/dev/sdb capacity:10G)
c) node2 osd.0 Disk(/dev/sdb capacity:10G)
d) node3 osd.1 Disk(/dev/sdb capacity:10G)
(1) admin節點切換到自定義的cephadmin用戶(避免使用sodu 或 root用戶調用ceph-deploy)
(2) 在cephadmin用戶下建立一個ceph-cluster的目錄,用來保存執行ceph-deploy命令後輸出的文件。
# mkdir ceph-cluster
# cd ceph-cluster
(3) Create a Cluster
a) ceph-cluster的目錄下,使用ceph-deploy命令,用初始的monitor節點建立cluster。
# ceph-deploy new {initial-monitor-node(s)}
例如
# ceph-deploy new node1
b) 將默認的osd數量從3改爲2。
修改ceph-cluster目錄下的 ceph.conf 配置文件,在[global]區域後添加:
osd pool default size = 2
osd pool default min size = 2
osd pool default pg num = 512
osd pool default pgp num = 512
osd crush chooseleaf type = 1
[osd]
osd journal size = 1024
(4) 在每一個節點中安裝Ceph(因爲公司網絡的限制,使用橋接模式時沒法上網,)
# ceph-deploy install {ceph-node}[{ceph-node} ...]
例)
# ceph-deploy install --no-adjust-repos admnode node1 node2
(5) 在admin節點上初始化某個(或多個節點爲mon節點)monitor(s)
# ceph-deploy mon create-initial
(6) 增長兩個OSDs
a) 查看集羣節點的磁盤節點信息,例如 /dev/sdb
# ceph-deploy disk list <節點Host名>
b) 準備OSDs
# ceph-deploy osd prepare node2:/dev/sdb node3:/dev/sdb
c) 激活OSDs節點(注:prepare OSDs時,ceph-deploy會自動格式化磁盤,做成/sdb1數據盤和/sdb2日誌盤,這裏使用數據盤/sdb1,而非整個/dev/sdb)
# ceph-deploy osd activate node2:/dev/sdb1 node3:/dev/sdb1
備)若是OSD激活失敗,或者OSD的狀態是down,查看
http://docs.ceph.com/docs/master/rados/operations/monitoring-osd-pg/
http://docs.ceph.com/docs/master/rados/troubleshooting/troubleshooting-osd/#osd-not-running
d) 推送configuration file 和 admin key 給admin節點和其餘全部Ceph節點,以即可以在全部節點上執行ceph CLI命令(如ceph -s),而不用必須在monitor節點上執行。
# ceph-deploy admin admnode node1 node2 node3
e) 確保對ceph.client.admin.keyring有足夠權限
# sudo chmod +r /etc/ceph/ceph.client.admin.keyring
f) 若是激活OSDs節點成功,則可經過在mon節點上執行ceph -s 或 ceph -w 查看到以下active+clean的信息
[root@node1 etc]# ceph -w
cluster 62d61946-b429-4802-b7a7-12289121a022
health HEALTH_OK
monmap e1: 1 mons at {node1=10.167.225.137:6789/0}
election epoch 2, quorum 0 node2
osdmap e9: 2 osds: 2 up, 2 in
pgmap v15: 64 pgs, 1 pools, 0 bytes data, 0 objects
67916 kB used, 18343 MB / 18409 MB avail
64 active+clean
2016-03-08 20:12:00.436008 mon.0 [INF] pgmap v15: 64 pgs: 64 active+clean; 0 bytes data, 67916 kB used, 18343 MB / 18409 MB avail
4、完整集羣節點Ceph Storage Cluster配置(http://docs.ceph.com/docs/master/start/quick-ceph-deploy/)
完整環境搭建,配置節點
主機名 角色 磁盤
================================================================
a) admnode deploy-node
b) node1 mon1,osd.2,mds Disk(/dev/sdb capacity:10G)
c) node2 osd.0,mon2 Disk(/dev/sdb capacity:10G)
d) node3 osd.1,mon3 Disk(/dev/sdb capacity:10G)
(1) 在Node1上增長一個OSD
# ceph-deploy osd prepare node1:/dev/sdb
# ceph-deploy osd activate node1:/dev/sdb1
執行成功後,集羣的狀態以下「
[root@node1 etc]# ceph -w
cluster 62d61946-b429-4802-b7a7-12289121a022
health HEALTH_OK
monmap e1: 1 mons at {node1=10.167.225.137:6789/0}
election epoch 2, quorum 0 node2
osdmap e13: 3 osds: 3 up, 3 in
pgmap v23: 64 pgs, 1 pools, 0 bytes data, 0 objects
102032 kB used, 27515 MB / 27614 MB avail
64 active+clean
2016-03-08 21:21:29.930307 mon.0 [INF] pgmap v23: 64 pgs: 64 active+clean; 0 bytes data, 102032 kB used, 27515 MB / 27614 MB avail
(2) 在Node1增長一個MDS(若是要使用CephFS,必需要有一個metadata server)
# ceph-deploy mds create node1
(3) 增長RGW實例(爲了使用Ceph Object Gateway組件)
# ceph-deploy rgw create node1
(4) 增長Monitors,根據Monitor節點法定人數(quorum)的要求,Monitors機器須要奇數以上的節點,所以增長2個MON節點,同時,MON集羣之間須要時間同步。
4.1 MONs節點之間配置時間同步(admnode做爲NTP服務器,因爲不鏈接外網,所以將使用local時間做爲ntp服務提供給ntp客戶端)。
a) 在admnode節點上配置局域網NTP服務器(使用local時間)。
a.1) 編輯/etc/ntp.conf,註釋掉 "server 0|1|2|3.centos.pool.ntp.org iburst"四行。
添加"server 127.127.1.0 fudge"和」127.127.1.0 stratum 8「這兩行
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 127.127.1.0 fudge
127.127.1.0 stratum 8
a.2) admin節點啓用ntpd服務
# sudo systemctl enable ntpd
# sudo systemctl restart ntpd
# sudo systemctl status ntpd
a.3) 查看ntpd服務啓動信息
# ntpstat
synchronised to local net at stratum 6
time correct to within 12 ms
polling server every 64 s
# ntpq -p
remote refid st t when poll reach delay offset jitter
==============================================================================
*LOCAL(0) .LOCL. 5 l 3 64 377 0.000 0.000 0.000
b) 在其他node1,node2,Node3三個須要配置Monitor服務的節點上,配置NTP,與NTP服務器同步時間。
b.1) 確保ntpd服務關閉
# sudo systemctl stop ntpd
# sudo systemctl status ntpd
b.2) 使用 ntpdate 命令先於NTP服務同步,確保offset在1000s內。
# sudo ntpdate <admnode's IP or hostname>
9 Mar 16:59:26 ntpdate[31491]: adjust time server 10.167.225.136 offset -0.000357 sec
b.3) 編輯/etc/ntp.conf,註釋掉 "server 0|1|2|3.centos.pool.ntp.org iburst"四行。
添加NTP服務器(admnode節點)的IP"server 10.167.225.136"
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 10.167.225.136
b.4) 啓動ntpd服務
# sudo systemctl enable ntpd
# sudo systemctl start ntpd
# sudo systemctl status ntpd
b.5) 查看ntpd服務啓動信息
# ntpstat
synchronised to NTP server (10.167.225.136) at stratum 7
time correct to within 7949 ms
polling server every 64 s
# ntpq -p
remote refid st t when poll reach delay offset jitter
==============================================================================
*admnode LOCAL(0) 6 u 6 64 1 0.223 -0.301 0.000
4.2 在集羣中增長兩個MON a) 新增Monitor節點 # ceph-deploy mon add node2 # ceph-deploy mon add node3 b) 節點安裝成功後查看集羣狀態以下: # ceph -s cluster 62d61946-b429-4802-b7a7-12289121a022 health HEALTH_OK monmap e3: 3 mons at {node1=10.167.225.137:6789/0,node2=10.167.225.138:6789/0,node3=10.167.225.141:6789/0} election epoch 8, quorum 0,1,2 node2,node3,node4 osdmap e21: 3 osds: 3 up, 3 in pgmap v46: 64 pgs, 1 pools, 0 bytes data, 0 objects 101 MB used, 27513 MB / 27614 MB avail 64 active+clean c)檢查quorum的狀態 # ceph quorum_status --format json-pretty 輸出以下: { "election_epoch": 8, "quorum": [ 0, 1, 2 ], "quorum_names": [ "node1", "node2", "node3" ], "quorum_leader_name": "node2", "monmap": { "epoch": 3, "fsid": "62d61946-b429-4802-b7a7-12289121a022", "modified": "2016-03-09 17:50:29.370831", "created": "0.000000", "mons": [ { "rank": 0, "name": "node1", "addr": "10.167.225.137:6789\/0" }, { "rank": 1, "name": "node2", "addr": "10.167.225.138:6789\/0" }, { "rank": 2, "name": "node3", "addr": "10.167.225.141:6789\/0" } ] } }