Ceph pool 資源池管理

Ceph Pool 資源池管理node

# 查看 ceph 資源池
ceph osd lspools

# 建立資源池
osd pool create <poolname> <int[0-]> {<int[0-]>} {replicated|erasure} {<erasure_code_profile>} {<rule>} {<int>} {<int>} {<int[0-]>} {<int[0-]>} {<float[0.0-1.0]>} :  create pool

[root@node1 ~]# ceph osd pool create pool_demo 16 16
pool 'pool_demo' created
[root@node1 ~]# ceph osd lspools
1 ceph-demo
2 .rgw.root
3 default.rgw.control
4 default.rgw.meta
5 default.rgw.log
6 cephfs_data
7 cephfs_metadata
8 pool_demo

# 修改資源池屬性
[root@node1 ~]# ceph osd pool get pool_demo size
size: 3
[root@node1 ~]# ceph osd pool get pool_demo pg_num
pg_num: 16
[root@node1 ~]# ceph osd pool get pool_demo pgp_num
pgp_num: 16
[root@node1 ~]# ceph osd pool set pool_demo pg_num  32
set pool 8 pg_num to 32
[root@node1 ~]# ceph osd pool set pool_demo pgp_num  32
set pool 8 pgp_num to 32

# 將資源池關聯到應用程序
ceph osd pool application enable {pool-name} {application-name}
# application-name=rbd|cephfs|rgw

[root@node1 ~]# ceph osd pool application enable pool_demo rbd
enabled application 'rbd' on pool 'pool_demo'
[root@node1 ~]# ceph osd pool application get pool_demo
{
    "rbd": {}
}

# 設置池配額 爲每一個池的最大字節數和/或最大對象數設置池配額
[root@node1 ~]# ceph osd pool get-quota pool_demo
quotas for pool 'pool_demo':
  max objects: N/A
  max bytes  : N/A
[root@node1 ~]# ceph osd pool set-quota pool_demo max objects 100
Invalid command: max not in max_objects|max_bytes
osd pool set-quota <poolname> max_objects|max_bytes <val> :  set object or byte limit on pool
Error EINVAL: invalid command
[root@node1 ~]# ceph osd pool set-quota pool_demo max_objects 100
set-quota max_objects = 100 for pool pool_demo
[root@node1 ~]# ceph osd pool get-quota pool_demo
quotas for pool 'pool_demo':
  max objects: 100 objects
  max bytes  : N/A

# 刪除池
ceph osd pool delete {pool-name} [{pool-name} --yes-i-really-really-mean-it]

若要刪除池,mon_allow_pool_delete配置中必須將"該標誌"設置爲 true。不然,他們將拒絕刪除池。
若是您爲建立的池建立了本身的規則,則在再也不須要池時應考慮刪除這些規則:

ceph osd pool get {pool-name} crush_rule
例如,若是規則爲"123",您能夠檢查其餘池,例如:

ceph osd dump | grep "^pool" | grep "crush_rule 123"
若是沒有其餘池使用該自定義規則,則從羣集中刪除該規則是安全的。

若是您爲再也不存在的池建立具備嚴格權限的用戶,則應考慮刪除這些用戶:

ceph auth ls | grep -C 5 {pool-name}
ceph auth del {user}

# 重命名池
ceph osd pool rename {current-pool-name} {new-pool-name}
若是重命名池,而且具備通過身份驗證的用戶的每一個池功能,則必須使用新的池名稱更新用戶的功能(即大寫字母)。

# 顯示池統計信息
rados df
ceph osd pool stats [{pool-name}]

[root@node1 ~]# rados df
POOL_NAME              USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS      RD WR_OPS      WR USED COMPR UNDER COMPR
.rgw.root           768 KiB       4      0     12                  0       0        0      0     0 B      4   4 KiB        0 B         0 B
ceph-demo           653 MiB      79      0    237                  0       0        0   1684 5.6 MiB    285 225 MiB        0 B         0 B
cephfs_data             0 B       0      0      0                  0       0        0      0     0 B      0     0 B        0 B         0 B
cephfs_metadata     1.5 MiB      22      0     66                  0       0        0      0     0 B     49  17 KiB        0 B         0 B
default.rgw.control     0 B       8      0     24                  0       0        0      0     0 B      0     0 B        0 B         0 B
default.rgw.log         0 B     175      0    525                  0       0        0  63063  61 MiB  42053     0 B        0 B         0 B
default.rgw.meta        0 B       0      0      0                  0       0        0      0     0 B      0     0 B        0 B         0 B
pool_demo               0 B       0      0      0                  0       0        0      0     0 B      0     0 B        0 B         0 B

total_objects    288
total_used       6.7 GiB
total_avail      143 GiB
total_space      150 GiB
[root@node1 ~]# ceph osd pool stats pool_demo
pool pool_demo id 8
  nothing is going on
相關文章
相關標籤/搜索