使用cephadm快速搭建ceph集羣

Ceph 基礎組件

  • Monitors(監視器,ceph-mon)Ceph Monitor 其維護集羣狀態映射,包括監視器映射、OSD映射MDS映射CRUSH映射;經過保存集羣狀態的映射來跟蹤整個集羣的健康情況。除此以外 Monitor 還負載管理守護進程和客戶端之間的身份驗證。一般至少須要部署三個 Ceph Monitor才能實現冗餘和高可用
  • Managers(管理器,ceph-mgr)Ceph Manager Daemon 負責跟蹤運行時指標和 Ceph 集羣當前狀態,包括存儲利用率,當前性能指標和系統負載。此外 Ceph Manager Daemon 還託管基於 Python 提供的模塊以管理和展現 Ceph 集羣信息,包括Ceph儀表盤REST API 。一般至少須要部署兩個 Ceph Manager Daemon 才能實現冗餘和高可用
  • Ceph OSDs(Ceph 對象存儲,ceph-osd):一旦應用程序向 Ceph 集羣發起寫操做,數據將以對象的形式存儲在 OSD 中。這是 Ceph 集羣中存儲用戶實際數據的惟一組件 ;其負責存儲數據,處理數據複製,恢復,從新平衡,並經過檢查其餘 Ceph OSD Daemon 的心跳來向 ceph-monceph-mgr 提供一些監視信息。一般至少須要部署三個 Ceph OSD 才能實現冗餘和高可用性
  • MDSs(Ceph 元數據服務器,ceph-mds)Ceph Metadata Server 負責跟蹤文件層次結構,僅爲 CephFS 文件系統存儲元數據,Ceph 塊設備和 Ceph對象存儲不使用 MDSCeph 的元數據服務器容許 POSIX 文件系統用戶執行基本命令(如 lsfind

Ceph 將數據做爲對象存儲在邏輯存儲池中。使用 CRUSH 算法,Ceph 計算哪一個放置組應包含該對象,並進一步計算哪一個 Ceph OSD 守護程序應存儲該放置組。CRUSH 算法使 Ceph 存儲集羣可以動態擴展,從新平衡和恢復python

Ceph 其餘組件

  • RBD:提供持久塊存儲,其是瘦配置的、可調整大小的,並在多個 osd 上存儲數據linux

  • RGW:提供對象存儲服務。它使用 librgwlibrados,容許應用程序與 Ceph 對象存儲創建鏈接;RGW 提供了與 Amazon S3OpenStack Swift 兼容的 RESTful api 接口git

  • CephFSCephFS 提供了一個符合 posix 標準的文件系統,它使用 Ceph 存儲集羣在文件系統上存儲用戶數據。與 RBDRGW 同樣,CephFS 服務也做爲 librados 的本機接口實現

使用 cephadm 安裝 Ceph 集羣

  • Ceph octopus 15.2.3
  • Python 3.6
  • Docker 18.09.6
  • 注意:OSD 硬盤須要大於 5G
系統版本 主機名 PublicIP 硬盤 角色
Centos 7,kernel 4.4.225 ceph01 192.168.1.71 OSD盤:/dev/sdb cephadm,ceph-mon,ceph-mgr,ceph-osd,ceph-mds,rgw
Centos 7,kernel 4.4.225 ceph02 192.168.1.72 OSD盤:/dev/sdb ceph-mon,ceph-mgr,ceph-osd,ceph-mds,rgw
Centos 7,kernel 4.4.225 ceph03 192.168.1.73 OSD盤:/dev/sdb ceph-mon,ceph-mgr,ceph-osd,ceph-mds,rgw

配置系統環境

# 關閉防火牆和SElinux
[root@ceph01 ~]# systemctl disable --now firewalld
[root@ceph01 ~]# setenforce 0
[root@ceph01 ~]# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# 配置時間同步
[root@ceph01 ~]# yum install -y chrony
[root@ceph01 ~]# systemctl enable --now chronyd

安裝依賴

[root@ceph01 ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@ceph01 ~]# yum -y install python3 yum-utils
[root@ceph01 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@ceph01 ~]# yum -y install docker-ce-18.09.6 docker-ce-cli-18.09.6
[root@ceph01 ~]# systemctl enable --now docker
[root@ceph01 ~]# curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
[root@ceph01 ~]# chmod +x cephadm

初始化集羣

# 此命令將生成 ceph yum 源
[root@ceph01 ~]# ./cephadm add-repo --release octopus

# 備份 ceph yum 源並將其替換使用 阿里雲 yum 源
[root@ceph01 ~]# cp /etc/yum.repos.d/ceph.repo{,.back}
[root@ceph01 ~]# sed -i 's#download.ceph.com#mirrors.aliyun.com/ceph#' /etc/yum.repos.d/ceph.repo
[root@ceph01 ~]# yum list | grep ceph

# 安裝 cephadm 到當前節點(其實就是將 cephadm 複製到環境變量)
[root@ceph01 ~]# ./cephadm install
[root@ceph01 ~]# which cephadm
/usr/sbin/cephadm

# 引導 Ceph 集羣(注意: 若是以爲速度慢能夠手動執行 docker pull docker.io/ceph/ceph:v15 拉取 docker 啓動 ceph 集羣所需鏡像)
[root@ceph01 ~]# mkdir -p /etc/ceph
[root@ceph01 ~]# cephadm bootstrap --mon-ip 192.168.1.71
......
        URL: https://ceph01:8443/
        User: admin
        Password: soricu721m

INFO:cephadm:You can access the Ceph CLI with:

        sudo /usr/sbin/cephadm shell --fsid ff2b5380-b5eb-11ea-85a5-000c29177e91 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

INFO:cephadm:Please consider enabling telemetry to help improve Ceph:

        ceph telemetry on

For more information see:

        https://docs.ceph.com/docs/master/mgr/telemetry/

INFO:cephadm:Bootstrap complete.

[root@ceph01 ~]# /usr/sbin/cephadm shell --fsid ff2b5380-b5eb-11ea-85a5-000c29177e91 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
INFO:cephadm:Using recent ceph image ceph/ceph:v15

[ceph: root@ceph01 /]# exit

# 在瀏覽器中訪問 https://ceph01:8443/ 打開 ceph ui, 第一次登錄要求更改默認密碼

# 安裝 ceph 工具包, 其中包括 ceph, rbd, mount.ceph 等命令
[root@ceph01 ~]# cephadm install ceph-common

基礎使用

# 查看 ceph 集羣全部組件運行狀態
[root@ceph01 ~]# ceph orch ps 
NAME               HOST    STATUS         REFRESHED  AGE  VERSION  IMAGE NAME               IMAGE ID      CONTAINER ID  
crash.ceph01       ceph01  running (83m)  3m ago     83m  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  6eeecc944c89  
mgr.ceph01.zqqyym  ceph01  running (84m)  3m ago     84m  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  4bb1b13e5c3b  
mon.ceph01         ceph01  running (84m)  3m ago     84m  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  ddf141776199

# 查看指定組件運行狀態
[root@ceph01 ~]# ceph orch ps --daemon-type mon
NAME        HOST    STATUS         REFRESHED  AGE  VERSION  IMAGE NAME               IMAGE ID      CONTAINER ID  
mon.ceph01  ceph01  running (86m)  5m ago     86m  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  ddf141776199

# 查看集羣當前狀態
[root@ceph01 ~]# ceph status
  cluster:
    id:     44c6209a-b5d4-11ea-a1b5-000c29177e91
    health: HEALTH_WARN
            Reduced data availability: 1 pg inactive
            OSD count 0 < osd_pool_default_size 3

  services:
    mon: 1 daemons, quorum ceph01 (age 90m)
    mgr: ceph01.zqqyym(active, since 89m)
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     100.000% pgs unknown
             1 unknown

將主機添加到集羣中

[root@ceph01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph02
[root@ceph01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph03

# 將主機添加到集羣中, 注意:目標主機必須安裝了 python3 和 docker
[root@ceph01 ~]# ceph orch host add ceph02
Added host 'ceph02'
[root@ceph01 ~]# ceph orch host add ceph03
Added host 'ceph03'

# 查看當前節點
[root@ceph01 ~]# ceph orch host ls
HOST    ADDR    LABELS  STATUS  
ceph01  ceph01                  
ceph02  ceph02                  
ceph03  ceph03 

# 查看集羣是否已經擴展完成(3個crash,3個mon,2個mgr)
[root@ceph01 ~]# ceph orch ps 
NAME               HOST    STATUS          REFRESHED  AGE   VERSION  IMAGE NAME               IMAGE ID      CONTAINER ID  
crash.ceph01       ceph01  running (110m)  2m ago     110m  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  6eeecc944c89  
crash.ceph02       ceph02  running (4m)    2m ago     4m    15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  53c83cfc2470  
crash.ceph03       ceph03  running (3m)    2m ago     3m    15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  91f41bd39008  
mgr.ceph01.zqqyym  ceph01  running (111m)  2m ago     111m  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  4bb1b13e5c3b  
mgr.ceph03.znmizf  ceph03  running (3m)    2m ago     3m    15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  9d65b425cbff  
mon.ceph01         ceph01  running (111m)  2m ago     111m  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  ddf141776199  
mon.ceph02         ceph02  running (3m)    2m ago     3m    15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  c3a004194faa  
mon.ceph03         ceph03  running (2m)    2m ago     2m    15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  7a6239a28215

部署 OSD

[root@ceph01 ~]# ceph orch daemon add osd ceph01:/dev/sdb
Created osd(s) 0 on host 'ceph01'

[root@ceph01 ~]# ceph orch daemon add osd ceph02:/dev/sdb
Created osd(s) 1 on host 'ceph02'

[root@ceph01 ~]# ceph orch daemon add osd ceph03:/dev/sdb
Created osd(s) 2 on host 'ceph03'

[root@ceph01 ~]# ceph orch device ls
HOST    PATH      TYPE   SIZE  DEVICE  AVAIL  REJECT REASONS                                          
ceph01  /dev/sda  hdd   40.0G          False  locked, Insufficient space (<5GB) on vgs, LVM detected  
ceph01  /dev/sdb  hdd   10.0G          False  locked, Insufficient space (<5GB) on vgs, LVM detected  
ceph02  /dev/sda  hdd   40.0G          False  Insufficient space (<5GB) on vgs, locked, LVM detected  
ceph02  /dev/sdb  hdd   10.0G          False  Insufficient space (<5GB) on vgs, locked, LVM detected  
ceph03  /dev/sda  hdd   40.0G          False  LVM detected, Insufficient space (<5GB) on vgs, locked  
ceph03  /dev/sdb  hdd   10.0G          False  LVM detected, Insufficient space (<5GB) on vgs, locked

部署 MDS 提供 CephFs 功能

# 建立一個 pool 用於存儲 cephfs 數據
[root@ceph01 ~]# ceph osd pool create cephfs_data 64 64
pool 'cephfs_data' created

# 建立一個 pool 用於存儲 cephfs 元數據
[root@ceph01 ~]# ceph osd pool create cephfs_metadata 32 32
pool 'cephfs_metadata' created

# 建立 cephfs, 指定 cephfs_metadata 存儲元數據, 指定 cephfs_data 存儲實際數據
[root@ceph01 ~]# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 3 and data pool 2

# 查看 cephfs
[root@ceph01 ~]# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

# 在 ceph01, ceph02, ceph03 部署 mds
[root@ceph01 ~]# ceph orch apply mds cephfs --placement="3 ceph01 ceph02 ceph03"
Scheduled mds.cephfs update...

# 查看 mds 是否啓動
[root@ceph01 ~]# ceph orch ps --daemon-type mds
NAME                      HOST    STATUS        REFRESHED  AGE  VERSION  IMAGE NAME               IMAGE ID      CONTAINER ID  
mds.cephfs.ceph01.lmrpri  ceph01  running (4m)  4m ago     4m   15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  bcbc19c43089  
mds.cephfs.ceph02.ggapal  ceph02  running (4m)  4m ago     4m   15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  987aa0d80bd9  
mds.cephfs.ceph03.eelzdg  ceph03  running (4m)  4m ago     4m   15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  f3e2de2d5817  

# 查看當前集羣的全部 pool
[root@ceph01 ~]# ceph osd lspools
1 device_health_metrics
2 cephfs_data
3 cephfs_metadata

部署 RGWS

# 建立領域
[root@ceph01 ~]# radosgw-admin realm create --rgw-realm=rgw01 --default 
{
    "id": "e8f17788-0adf-47fd-a920-4d3be7faf604",
    "name": "rgw01",
    "current_period": "436ef150-e9ef-4748-83e9-b572a8812465",
    "epoch": 1
}

# 建立區域組
[root@ceph01 ~]# radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
{
    "id": "aa0c224d-5894-40f9-88ce-6d65fef851d1",
    "name": "default",
    "api_name": "default",
    "is_master": "true",
    "endpoints": [],
    "hostnames": [],
    "hostnames_s3website": [],
    "master_zone": "",
    "zones": [],
    "placement_targets": [],
    "default_placement": "",
    "realm_id": "e8f17788-0adf-47fd-a920-4d3be7faf604",
    "sync_policy": {
        "groups": []
    }
}

# 建立區域
[root@ceph01 ~]# radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=cn-hangzho --master --default
{
    "id": "e0cd3501-b5bc-4015-832e-6b6b02708c02",
    "name": "cn-hangzho",
    "domain_root": "cn-hangzho.rgw.meta:root",
    "control_pool": "cn-hangzho.rgw.control",
    "gc_pool": "cn-hangzho.rgw.log:gc",
    "lc_pool": "cn-hangzho.rgw.log:lc",
    "log_pool": "cn-hangzho.rgw.log",
    "intent_log_pool": "cn-hangzho.rgw.log:intent",
    "usage_log_pool": "cn-hangzho.rgw.log:usage",
    "roles_pool": "cn-hangzho.rgw.meta:roles",
    "reshard_pool": "cn-hangzho.rgw.log:reshard",
    "user_keys_pool": "cn-hangzho.rgw.meta:users.keys",
    "user_email_pool": "cn-hangzho.rgw.meta:users.email",
    "user_swift_pool": "cn-hangzho.rgw.meta:users.swift",
    "user_uid_pool": "cn-hangzho.rgw.meta:users.uid",
    "otp_pool": "cn-hangzho.rgw.otp",
    "system_key": {
        "access_key": "",
        "secret_key": ""
    },
    "placement_pools": [
        {
            "key": "default-placement",
            "val": {
                "index_pool": "cn-hangzho.rgw.buckets.index",
                "storage_classes": {
                    "STANDARD": {
                        "data_pool": "cn-hangzho.rgw.buckets.data"
                    }
                },
                "data_extra_pool": "cn-hangzho.rgw.buckets.non-ec",
                "index_type": 0
            }
        }
    ],
    "realm_id": "e8f17788-0adf-47fd-a920-4d3be7faf604"
}

# 在 ceph01, ceph02, ceph03 上部署 rgw
[root@ceph01 ~]# ceph orch apply rgw rgw01 cn-hangzho --placement="3 ceph01 ceph02 ceph03"
Scheduled rgw.rgw01.cn-hangzho update...

# 查看各節點 rgw 是否啓動
[root@ceph01 ~]# ceph orch ps --daemon-type rgw
NAME                                HOST    STATUS         REFRESHED  AGE  VERSION  IMAGE NAME               IMAGE ID      CONTAINER ID  
rgw.rgw01.cn-hangzho.ceph01.uaglvv  ceph01  running (25s)  23s ago    25s  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  d7e90e8d4d1b  
rgw.rgw01.cn-hangzho.ceph02.szkeqj  ceph02  running (27s)  21s ago    27s  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  95171c8f0e4b  
rgw.rgw01.cn-hangzho.ceph03.zphone  ceph03  running (26s)  20s ago    26s  15.2.3   docker.io/ceph/ceph:v15  d72755c420bc  19804d391100

測試 RBD 使用

# 建立 RBD
[root@ceph01 ~]# ceph osd pool create rbd 16

# application enable RBD
[root@ceph01 ~]# ceph osd pool application enable rbd rbd

# 建立 rbd 存儲, 指定大小爲 10GB
[root@ceph01 ~]# rbd create rbd1 --size 10240

# 查看 rbd 信息
[root@ceph01 ~]# rbd --image rbd1 info
rbd image 'rbd1':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 39e7c066a17a
        block_name_prefix: rbd_data.39e7c066a17a
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Wed Jun 24 16:52:53 2020
        access_timestamp: Wed Jun 24 16:52:53 2020
        modify_timestamp: Wed Jun 24 16:52:53 2020

[root@ceph01 ~]# ceph osd crush tunables hammer
adjusted tunables profile to hammer

[root@ceph01 ~]# ceph osd crush reweight-all
reweighted crush hierarchy

# 因爲關閉一些內核默認不支持的特性
[root@ceph01 ~]# rbd feature disable rbd1 exclusive-lock object-map fast-diff deep-flatten

# 查看特性是否已禁用
[root@ceph01 ~]# rbd --image rbd1 info | grep features
        features: layering
        op_features: 

# 映射到客戶端(在須要掛載的客戶端運行)
[root@ceph01 ~]# rbd map --image rbd1
/dev/rbd0

# 查看映射狀況
[root@ceph01 ~]# rbd showmapped 
id  pool  namespace  image  snap  device   
0   rbd              rbd1   -     /dev/rbd0

# 格式化
[root@ceph01 ~]# mkfs.xfs /dev/rbd0

# 建立掛載目錄, 並將 rbd 掛載到指定目錄
[root@ceph01 ~]# mkdir /mnt/rbd
[root@ceph01 ~]# mount /dev/rbd0 /mnt/rbd/

# 查看掛載狀況
[root@ceph01 ~]# df -hl | grep rbd
/dev/rbd0                 10G   33M   10G   1% /mnt/rbd

測試對象存儲

# 安裝 AWS s3 API
[root@ceph01 ~]# yum -y install s3cmd

# 建立用戶
[root@ceph01 ~]# radosgw-admin user create --uid=s3 --display-name="objcet storage" --system

# 獲取用戶 access_key 和 secret_key
[root@ceph01 ~]# radosgw-admin user info --uid=s3 | grep -E "access_key|secret_key"
            "access_key": "RPRUFOWDK0T4MI4GL27C",
            "secret_key": "32efWJ7O5CGeKJbRdsDuyderNwwLLNOp4cnt13ZS"

# 生成 S3 客戶端配置(設置一下參數, 其他默認便可)
[root@ceph01 ~]# s3cmd --configure
Access Key: RPRUFOWDK0T4MI4GL27C
Secret Key: 32efWJ7O5CGeKJbRdsDuyderNwwLLNOp4cnt13ZS
S3 Endpoint [s3.amazonaws.com]: ceph01
DNS-style bucket+hostname:port template for accessing a bucket [%(bucket)s.s3.amazonaws.com]: %(bucket).ceph01
Use HTTPS protocol [Yes]: no
Test access with supplied credentials? [Y/n] y
Save settings? [y/N] y
Configuration saved to '/root/.s3cfg'

# 建立桶
[root@ceph01 ~]# s3cmd mb s3://bucket
Bucket 's3://bucket/' created

# 查看當前全部桶
[root@ceph01 ~]# s3cmd ls
2020-06-28 03:02  s3://bucket

測試 CephFs 使用

# 建立用戶用於客戶端訪問 CephFs
[root@ceph01 ~]# ceph auth get-or-create client.cephfs mon 'allow r' mds 'allow r, allow rw path=/' osd 'allow rw pool=cephfs_data' -o ceph.client.cephfs.keyring

# 獲取用戶 tooken
[root@ceph01 ~]# ceph auth get-key client.cephfs
AQAyGfNeTXKzDhAAkQPKLA72/Rriy9qpczzp8A==

# 建立掛載目錄, 並將 Ceph 掛載到指定目錄, 此種掛載方式被稱爲內核驅動的掛載方式, 也能夠將其經過 NFS Ganesha 輸出爲 NFS 服務器格式
[root@ceph01 ~]# mkdir /mnt/cephfs/
[root@ceph01 ~]# mount -t ceph ceph01:/ /mnt/cephfs/ -o name=cephfs,secret=AQAyGfNeTXKzDhAAkQPKLA72/Rriy9qpczzp8A==

# 查看掛載
[root@ceph01 ~]# mount | grep cephfs
192.168.1.71:/ on /mnt/cephfs type ceph (rw,relatime,name=cephfs,secret=<hidden>,acl)
相關文章
相關標籤/搜索