glance-api.conf
|
[glance_store]
filesystem_store_datadir = /opt/stack/data/glance/images/
|
|
|
[glance_store]
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
stores = file, http, rbd
default_store = rbd
filesystem_store_datadir = /opt/stack/data/glance/images/
|
|
nova.conf 或 nova-cpu.conf
|
[libvirt]
live_migration_uri = qemu+ssh://stack@%s/system
cpu_mode = none
virt_type = kvm
|
|
1
2
3
4
5
6
7
8
9
10
11
12
|
[libvirt]
images_rbd_ceph_conf = /etc/ceph/ceph.conf
images_rbd_pool = vms
images_type = rbd
disk_cachemodes = network=writeback
inject_partition = -2
inject_key = false
rbd_secret_uuid = df0d0b60-047a-45f5-b5be-f7d2b4beadee
rbd_user = cinder
live_migration_uri = qemu+ssh://stack@%s/system
cpu_mode = none
virt_type = kvm
|
|
cinder.conf
|
[lvmdriver-1]
image_volume_cache_enabled = True
volume_clear = zero
lvm_type = auto
iscsi_helper = tgtadm
volume_group = stack-volumes-lvmdriver-1
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name = lvmdriver-1
|
|
cinder.conf
|
[ceph]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = df0d0b60-047a-45f5-b5be-f7d2b4beadee
rbd_user = cinder
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
|
|
|
ceph.conf 控制節點在配置好後顯示的:
1
2
3
4
5
6
7
8
9
10
11
12
|
[global]
rbd default features = 1
osd pool default size = 1
osd journal size = 100
osd crush chooseleaf type = 0
filestore_xattr_use_omap = true
auth_client_required = cephx
auth_service_required = cephx
auth_cluster_required = cephx
mon_host = 172.16.1.17
mon_initial_members = controller
fsid = eab37548-7aef-466a-861c-3757a12ce9e8
|
在ceph集羣的admin節點(在該節點用ceph-deploy創建ceph存儲集羣)建立初始化monitor後,會獲得多個祕鑰文件,要將這些祕鑰文件和該節點配置的ceph.conf文件分發到其餘全部節點(其餘ceph節點,計算節點,控制節點...) fsid是自動配置的 |