centos 7.5 kernel 4.18.7-1.el7.elrepo.x86_64 docker 18.06 kubernetes v1.12.2 kubeadm部署: 網絡: canal DNS: coredns 集羣成員: 192.168.1.1 kube-master 192.168.1.2 kube-node1 192.168.1.3 kube-node2 192.168.1.4 kube-node3 192.168.1.5 kube-node4 全部node節點準備一塊200G的磁盤:/dev/sdb
cat <<EOF > /etc/sysctl.d/ceph.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl --system
#無另外說明,所有操做都在master操做 cd $HOME git clone https://github.com/rook/rook.git cd rook cd cluster/examples/kubernetes/ceph kubectl apply -f operator.yaml
#執行apply以後稍等一會。 #operator會在集羣內的每一個主機建立兩個pod:rook-discover,rook-ceph-agent kubectl -n rook-ceph-system get pod -o wide
kubectl label nodes {kube-node1,kube-node2,kube-node3} ceph-mon=enabled
kubectl label nodes {kube-node1,kube-node2,kube-node3} ceph-osd=enabled
#mgr只能支持一個節點運行,這是ceph跑k8s裏的侷限 kubectl label nodes kube-node1 ceph-mgr=enabled
官方配置文件詳解:https://rook.io/docs/rook/v0.8/ceph-cluster-crd.htmlnode
文件中有幾個地方要注意:nginx
apiVersion: v1 kind: Namespace metadata: name: rook-ceph --- apiVersion: v1 kind: ServiceAccount metadata: name: rook-ceph-cluster namespace: rook-ceph --- kind: Role apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: rook-ceph-cluster namespace: rook-ceph rules: - apiGroups: [""] resources: ["configmaps"] verbs: [ "get", "list", "watch", "create", "update", "delete" ] --- # Allow the operator to create resources in this cluster's namespace kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: rook-ceph-cluster-mgmt namespace: rook-ceph roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: rook-ceph-cluster-mgmt subjects: - kind: ServiceAccount name: rook-ceph-system namespace: rook-ceph-system --- # Allow the pods in this namespace to work with configmaps kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: rook-ceph-cluster namespace: rook-ceph roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: rook-ceph-cluster subjects: - kind: ServiceAccount name: rook-ceph-cluster namespace: rook-ceph --- apiVersion: ceph.rook.io/v1beta1 kind: Cluster metadata: name: rook-ceph namespace: rook-ceph spec: cephVersion: # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). # v12 is luminous, v13 is mimic, and v14 is nautilus. # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. image: ceph/ceph:v13 # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported. # After nautilus is released, Rook will be updated to support nautilus. # Do not set to true in production. allowUnsupported: false # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended). # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. dataDirHostPath: /var/lib/rook # The service account under which to run the daemon pods in this cluster if the default account is not sufficient (OSDs) serviceAccount: rook-ceph-cluster # set the amount of mons to be started # count能夠定義ceph-mon運行的數量,這裏默認三個就好了 mon: count: 3 allowMultiplePerNode: true # enable the ceph dashboard for viewing cluster status # 開啓ceph資源面板 dashboard: enabled: true # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) # urlPrefix: /ceph-dashboard network: # toggle to use hostNetwork # 使用宿主機的網絡進行通信 # 使用宿主機的網絡貌似可讓集羣外的主機掛載ceph # 可是我沒試過,有興趣的兄弟能夠試試改爲true # 反正這裏只是集羣內用,我就不改了 hostNetwork: false # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and # tolerate taints with a key of 'storage-node'. placement: # all: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: role # operator: In # values: # - storage-node # podAffinity: # podAntiAffinity: # tolerations: # - key: storage-node # operator: Exists # The above placement information can also be specified for mon, osd, and mgr components # mon: # osd: # mgr: # nodeAffinity:經過選擇標籤的方式,能夠限制pod被調度到特定的節點上 # 建議限制一下,爲了讓這幾個pod不亂跑 mon: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: ceph-mon operator: In values: - enabled osd: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: ceph-osd operator: In values: - enabled mgr: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: ceph-mgr operator: In values: - enabled resources: # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory # mgr: # limits: # cpu: "500m" # memory: "1024Mi" # requests: # cpu: "500m" # memory: "1024Mi" # The above example requests/limits can also be added to the mon and osd components # mon: # osd: storage: # cluster level storage configuration and selection useAllNodes: false useAllDevices: false deviceFilter: location: config: # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories. # Set the storeType explicitly only if it is required not to use the default. # storeType: bluestore # databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) # journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) # Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set. # directories: # - path: /rook/storage-dir # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. #建議磁盤配置方式以下: #name: 選擇一個節點,節點名字爲kubernetes.io/hostname的標籤,也就是kubectl get nodes看到的名字 #devices: 選擇磁盤設置爲OSD # - name: "sdb":將/dev/sdb設置爲osd nodes: - name: "kube-node1" devices: - name: "sdb" - name: "kube-node2" devices: - name: "sdb" - name: "kube-node3" devices: - name: "sdb" # directories: # specific directories to use for storage can be specified for each node # - path: "/rook/storage-dir" # resources: # limits: # cpu: "500m" # memory: "1024Mi" # requests: # cpu: "500m" # memory: "1024Mi" # - name: "172.17.4.201" # devices: # specific devices to use for storage can be specified for each node # - name: "sdb" # - name: "sdc" # config: # configuration can be specified at the node level which overrides the cluster level config # storeType: filestore # - name: "172.17.4.301" # deviceFilter: "^sd."
kubectl apply -f cluster.yaml # cluster會在rook-ceph這個namesapce建立資源 # 盯着這個namesapce的pod你就會發現,它在按照順序建立Pod kubectl -n rook-ceph get pod -o wide -w # 看到全部的pod都Running就好了 # 注意看一下pod分佈的宿主機,跟咱們打標籤的主機是一致的 kubectl -n rook-ceph get pod -o wide
切換到其餘主機看一下磁盤git
lsblk
lsblk
kubectl -n rook-ceph get service #能夠看到dashboard監聽了8443端口
kubectl apply -f dashboard-external-https.yaml # 查看一下nodeport在哪一個端口 ss -tanl kubectl -n rook-ceph get service
MGR_POD=`kubectl get pod -n rook-ceph | grep mgr | awk '{print $1}'` kubectl -n rook-ceph logs $MGR_POD | grep password
apiVersion: ceph.rook.io/v1beta1 kind: Pool metadata: #這個name就是建立成ceph pool以後的pool名字 name: replicapool namespace: rook-ceph spec: replicated: size: 1 # size 池中數據的副本數,1就是不保存任何副本 failureDomain: osd # failureDomain:數據塊的故障域, # 值爲host時,每一個數據塊將放置在不一樣的主機上 # 值爲osd時,每一個數據塊將放置在不一樣的osd上 --- apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: ceph # StorageClass的名字,pvc調用時填的名字 provisioner: ceph.rook.io/block parameters: pool: replicapool # Specify the namespace of the rook cluster from which to create volumes. # If not specified, it will use `rook` as the default namespace of the cluster. # This is also the namespace where the cluster will be clusterNamespace: rook-ceph # Specify the filesystem type of the volume. If not specified, it will use `ext4`. fstype: xfs # 設置回收策略默認爲:Retain reclaimPolicy: Retain
kubectl apply -f storageclass.yaml kubectl get storageclasses.storage.k8s.io -n rook-ceph kubectl describe storageclasses.storage.k8s.io -n rook-ceph
cat << EOF > nginx.yaml --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: nginx-pvc spec: accessModes: - ReadWriteMany resources: requests: storage: 1Gi storageClassName: ceph --- apiVersion: v1 kind: Service metadata: name: nginx spec: selector: app: nginx ports: - port: 80 name: nginx-port targetPort: 80 protocol: TCP --- apiVersion: apps/v1 kind: Deployment metadata: name: nginx spec: replicas: 1 selector: matchLabels: app: nginx template: metadata: name: nginx labels: app: nginx spec: containers: - name: nginx image: nginx ports: - containerPort: 80 volumeMounts: - mountPath: /html name: http-file volumes: - name: http-file persistentVolumeClaim: claimName: nginx-pvc EOF kubectl apply -f nginx.yaml
kubectl get pv,pvc # 看一下nginx這個pod也運行了 kubectl get pod
kubectl delete -f nginx.yaml kubectl get pv,pvc # 能夠看到,pod和pvc都已經被刪除了,可是pv還在!!!
kubectl label nodes kube-node4 ceph-osd=enabled
# 原來的基礎上添加node4的信息 cd $HOME/rook/cluster/examples/kubernetes/ceph/ vi cluster.yam
kubectl apply -f cluster.yaml # 盯着rook-ceph名稱空間,集羣會自動添加node4進來 kubectl -n rook-ceph get pod -o wide -w kubectl -n rook-ceph get pod -o wide
lsblk
kubectl label nodes kube-node3 ceph-osd-
# 刪除node3的信息 cd $HOME/rook/cluster/examples/kubernetes/ceph/ vi cluster.yam
kubectl apply -f cluster.yaml # 盯着rook-ceph名稱空間 kubectl -n rook-ceph get pod -o wide -w kubectl -n rook-ceph get pod -o wide # 最後記得刪除宿主機的/var/lib/rook文件夾
官方解答:https://rook.io/docs/rook/v0.8/common-issues.htmlgithub
#解決辦法: # 標記節點爲 drain 狀態 kubectl drain <node-name> --ignore-daemonsets --delete-local-data # 而後再恢復 kubectl uncordon <node-name>