Ceph版本:v13.2.5 mimic穩定版html
[root@ceph-node1 ceph]# ceph osd pool create k8s 128 128 pool 'k8s' created [root@ceph-node1 ceph]# ceph osd pool ls k8s
本環境中直接使用了Ceph的admin帳號,固然生產環境中仍是要根據不一樣功能客戶端分配不一樣的帳號:ceph auth get-or-create client.k8s mon 'allow r' osd 'allow rwx pool=k8s' -o ceph.client.k8s.keyring
獲取帳號的密鑰:node
[root@ceph-node1 ceph]# ceph auth get-key client.admin | base64 QVFDMmIrWmNEL3JTS2hBQWwwdmR3eGJGMmVYNUM3SjdDUGZZbkE9PQ==
使用StorageClass動態建立PV時,controller-manager會自動在Ceph上建立image,因此咱們要爲其準備好rbd命令。
(1) 若是集羣是用kubeadm部署的,因爲controller-manager官方鏡像中沒有rbd命令,因此咱們要導入外部配置:nginx
kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-provisioner rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] - apiGroups: [""] resources: ["services"] resourceNames: ["kube-dns","coredns"] verbs: ["list", "get"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-provisioner subjects: - kind: ServiceAccount name: rbd-provisioner namespace: default roleRef: kind: ClusterRole name: rbd-provisioner apiGroup: rbac.authorization.k8s.io --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: rbd-provisioner rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get"] - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: rbd-provisioner roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: rbd-provisioner subjects: - kind: ServiceAccount name: rbd-provisioner namespace: default --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: rbd-provisioner spec: replicas: 1 strategy: type: Recreate template: metadata: labels: app: rbd-provisioner spec: containers: - name: rbd-provisioner image: quay.io/external_storage/rbd-provisioner:latest env: - name: PROVISIONER_NAME value: ceph.com/rbd serviceAccount: rbd-provisioner --- apiVersion: v1 kind: ServiceAccount metadata: name: rbd-provisioner
kubectl apply -f rbd-provisioner.yaml
注意:rbd-provisioner的鏡像要和ceph的版本適配,這裏鏡像使用最新的,根據官方提示已支持ceph mimic版。
(2) 若是集羣是用二進制方式部署的,直接在master節點安裝ceph-common便可。
YUM源:後端
[Ceph] name=Ceph packages for $basearch baseurl=http://download.ceph.com/rpm-mimic/el7/$basearch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc priority=1 [Ceph-noarch] name=Ceph noarch packages baseurl=http://download.ceph.com/rpm-mimic/el7/noarch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc priority=1 [ceph-source] name=Ceph source packages baseurl=http://download.ceph.com/rpm-mimic/el7/SRPMS enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc priority=1
#安裝客戶端yum -y install ceph-common-13.2.5
#拷貝keyring文件
將ceph的ceph.client.admin.keyring文件拷貝到master的/etc/ceph目錄下。api
建立pod時,kubelet須要使用rbd命令去檢測和掛載pv對應的ceph image,因此要在全部的worker節點安裝ceph客戶端ceph-common-13.2.5。bash
kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: ceph-sc namespace: default annotations: storageclass.kubernetes.io/is-default-class: "false" provisioner: ceph.com/rbd reclaimPolicy: Retain parameters: monitors: 172.16.1.31:6789,172.16.1.32:6789,172.16.1.33:6789 adminId: admin adminSecretName: storage-secret adminSecretNamespace: default pool: k8s fsType: xfs userId: admin userSecretName: storage-secret imageFormat: "2" imageFeatures: "layering"
kubectl apply -f storage_class.yamlcookie
apiVersion: v1 kind: Secret metadata: name: storage-secret namespace: default data: key: QVFDMmIrWmNEL3JTS2hBQWwwdmR3eGJGMmVYNUM3SjdDUGZZbkE9PQ== type: kubernetes.io/rbd
kubectl apply -f storage_secret.yaml
注意:provisioner的值要和rbd-provisioner設置的值同樣app
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: ceph-pvc namespace: default spec: storageClassName: ceph-sc accessModes: - ReadWriteOnce resources: requests: storage: 1Gi
kubectl apply -f storage_pvc.yaml
#建立完PVC後,PV會自動建立:ide
[root@k8s-master03 ceph]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-315991e9-7d4b-11e9-b6cc-0050569ba238 1Gi RWO Retain Bound default/ceph-sc-test prom-sc 13h
#正常狀況PVC也處於Bound狀態測試
[root@k8s-master03 ceph]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ceph-sc-test Bound pvc-315991e9-7d4b-11e9-b6cc-0050569ba238 1Gi RWO prom-sc 17s
apiVersion: v1 kind: Pod metadata: name: ceph-pod1 spec: nodeName: k8s-node02 containers: - name: nginx image: nginx:1.14 volumeMounts: - name: ceph-rdb-vol1 mountPath: /usr/share/nginx/html readOnly: false volumes: - name: ceph-rdb-vol1 persistentVolumeClaim: claimName: ceph-pvc
kubectl apply -f storage_pod.yaml
#查看pod狀態
[root@k8s-master03 ceph]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES ceph-pod1 1/1 Running 0 3d23h 10.244.4.75 k8s-node02 <none> <none>
#進入容器查看掛載狀況,能夠看到rbd已掛載到/usr/share/nginx/html目錄。
[root@k8s-master03 ceph]# kubectl exec -it ceph-pod1 -- /bin/bash root@ceph-pod1:/# df –hT /dev/rbd0 xfs 1014M 33M 982M 4% /usr/share/nginx/html #在掛載目錄下添加一個測試文件 root@ceph-pod1:/# cat /usr/share/nginx/html/index.html hello ceph!
#在Ceph上檢查對應image掛載的節點,目前在172.16.1.22即k8s-node02。
[root@ceph-node1 ~]# rbd status k8s/kubernetes-dynamic-pvc-2410765c-7dec-11e9-aa80-26a98c3bc9e4 Watchers: watcher=172.16.1.22:0/264870305 client.24553 cookie=18446462598732840961
#然後咱們刪掉這個的pod
[root@k8s-master03 ceph]# kubectl delete -f storage_pod.yaml pod "ceph-pod1" deleted
#修改清單文件storage_pod.yaml,將pod調度到k8s-node01上,並應用。
#稍後,查看pod的狀態,改pod已部署在k8s-node01上了。
[root@k8s-master01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES ceph-pod1 1/1 Running 0 34s 10.244.3.28 k8s-node01 <none> <none>
#在Ceph上再次檢查image掛載節點,目前在172.16.1.21即k8s-node01
[root@ceph-node1 ~]# rbd status k8s/kubernetes-dynamic-pvc-2410765c-7dec-11e9-aa80-26a98c3bc9e4 Watchers: watcher=172.16.1.21:0/1812501701 client.114340 cookie=18446462598732840963
#進入容器,檢查文件存在並無丟失,說明pod切換節點後使用了原來的image。
[root@k8s-master03 ceph]# kubectl exec -it ceph-pod1 -- /bin/bash root@ceph-pod1:/# cat /usr/share/nginx/html/index.html hello ceph!