高可用OpenStack(Queen版)集羣-15.Glance&Cinder集成Ceph

參考文檔:html

  1. Install-guide:https://docs.openstack.org/install-guide/
  2. OpenStack High Availability Guide:https://docs.openstack.org/ha-guide/index.html
  3. 理解Pacemaker:http://www.cnblogs.com/sammyliu/p/5025362.html

十九.Glance集成Ceph

1. 配置glance-api.conf

# 在運行glance-api服務的節點修改glance-api.conf文件,含3個控制節點,以controller01節點爲例
# 如下只列出涉及glance集成ceph的section
[root@controller01 ~]# vim /etc/glance/glance-api.conf
# 打開copy-on-write功能
[DEFAULT]
show_image_direct_url = True

# 變動默認使用的本地文件存儲爲ceph rbd存儲;
# 注意紅色字體部分先後一致
[glance_store]
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
stores = rbd
default_store = rbd
rbd_store_chunk_size = 8
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf

# 變動配置文件,重啓服務
[root@controller01 ~]# systemctl restart openstack-glance-api.service
[root@controller01 ~]# systemctl restart openstack-glance-registry.service

2. 上傳鏡像

# 鏡像上傳後,默認地址爲ceph集羣(ID)的images pool下
[root@controller01 ~]# openstack image create "cirros-qcow2" \
 --file ~/cirros-0.3.5-x86_64-disk.img \
 --disk-format qcow2 --container-format bare \
 --public

# 檢查
[root@controller01 ~]# rbd ls images

3. 定義pool類型

# images啓用後,ceph集羣狀態變爲:HEALTH_WARN
[root@controller01 ~]# ceph -s

# 使用」ceph health detail」,能給出解決辦法;
# 未定義pool池類型,可定義爲'cephfs', 'rbd', 'rgw'等
[root@controller01 ~]# ceph health detail

# 同時解決volumes與vms兩個pool的問題
[root@controller01 ~]# ceph osd pool application enable images rbd
[root@controller01 ~]# ceph osd pool application enable volumes rbd
[root@controller01 ~]# ceph osd pool application enable vms rbd

# 查看
[root@controller01 ~]# ceph health detail
[root@controller01 ~]# ceph osd pool application get images
[root@controller01 ~]# ceph osd pool application get volumes
[root@controller01 ~]# ceph osd pool application get vms

二十.Cinder集成Ceph

1. 配置cinder.conf

# cinder利用插件式結構,支持同時使用多種後端存儲;
# 在cinder-volume所在節點設置cinder.conf中設置相應的ceph rbd驅動便可;
# 含3個計算(存儲)節點,以compute01節點爲例;
# 如下只列出涉及cinder集成ceph的section
[root@compute01 ~]# vim /etc/cinder/cinder.conf
# 後端使用ceph存儲
[DEFAULT]
enabled_backends = ceph

# 新增[ceph] section;
# 注意紅色字體部分先後一致
[ceph]
# ceph rbd驅動
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
# 若是配置多後端,則「glance_api_version」必須配置在[DEFAULT] section
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = 10744136-583f-4a9c-ae30-9bfb3515526b
volume_backend_name = ceph

# 變動配置文件,重啓服務
[root@controller01 ~]# systemctl restart openstack-glance-api.service
[root@controller01 ~]# systemctl restart openstack-glance-registry.service

2. 驗證

# 查看cinder服務狀態,cinder-volume集成ceph後,狀態」up」;
# 或:cinder service-list
[root@controller01 ~]# openstack volume service list

3. 生成volume

1)設置卷類型

# 在控制節點爲cinder的ceph後端存儲建立對應的type,在配置多存儲後端時可區分類型;
# 可經過「cinder type-list」查看
[root@controller01 ~]# cinder type-create ceph

# 爲ceph type設置擴展規格,鍵值」 volume_backend_name」,value值」ceph」
[root@controller01 ~]# cinder type-key ceph set volume_backend_name=ceph
[root@controller01 ~]# cinder extra-specs-list

2)生成volume

# 生成volume;
# 最後的數字」1」表明容量爲1G
[root@controller01 ~]# cinder create --volume-type ceph --name ceph-volume1 1

# 檢查生成的volume;
# 或:cinder list
[root@controller01 ~]# openstack volume list

# 檢查ceph集羣的volumes pool
[root@controller01 ~]# rbd ls volumes

相關文章
相關標籤/搜索