參考文檔:html
Heketi是一個提供RESTful API管理GlusterFS卷的框架,便於管理員對GlusterFS進行操做:node
Kubernetes與GlusterFS集羣已提早部署完成,請參考:linux
注意:GlusterFS只須要安裝並啓動便可,沒必要組建受信存儲池(trusted storage pools)git
Hostnamegithub |
IPjson |
Remarkvim |
kubenode1後端 |
172.30.200.21centos |
|
kubenode2api |
172.30.200.22 |
|
kubenode3 |
172.30.200.23 |
|
heketi |
172.30.200.80 |
selinux disabled |
glusterfs01 |
172.30.200.81 |
|
glusterfs02 |
172.30.200.82 |
|
glusterfs03 |
172.30.200.83 |
# 設置iptables,heketi默認以tcp8080端口提供RESTful API服務; [root@heketi ~]# vim /etc/sysconfig/iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 8080 -j ACCEPT [root@heketi ~]# service iptables restart
# 添加gluster yum源,默認yum源中無相關package; # heketi:heketi服務; # heketi-client:heketi客戶端/命令行工具 [root@heketi ~]# yum install -y centos-release-gluster [root@heketi ~]# yum install -y heketi heketi-client
# 注意紅色字體是修改部分 [root@heketi ~]# vim /etc/heketi/heketi.json { # 默認端口tcp8080 "_port_comment": "Heketi Server Port Number", "port": "8080", # 默認值false,不須要認證 "_use_auth": "Enable JWT authorization. Please enable for deployment", "use_auth": true, "_jwt": "Private keys for access", "jwt": { "_admin": "Admin has access to all APIs", "admin": { "key": "admin@123" }, "_user": "User only has access to /volumes endpoint", "user": { "key": "user@123" } }, "_glusterfs_comment": "GlusterFS Configuration", "glusterfs": { "_executor_comment": [ "Execute plugin. Possible choices: mock, ssh", "mock: This setting is used for testing and development.", " It will not send commands to any node.", "ssh: This setting will notify Heketi to ssh to the nodes.", " It will need the values in sshexec to be configured.", "kubernetes: Communicate with GlusterFS containers over", " Kubernetes exec api." ], # mock:測試環境下建立的volume沒法掛載; # kubernetes:在GlusterFS由kubernetes建立時採用 "executor": "ssh", "_sshexec_comment": "SSH username and private key file information", "sshexec": { "keyfile": "/etc/heketi/heketi_key", "user": "root", "port": "22", "fstab": "/etc/fstab" }, "_kubeexec_comment": "Kubernetes configuration", "kubeexec": { "host" :"https://kubernetes.host:8443", "cert" : "/path/to/crt.file", "insecure": false, "user": "kubernetes username", "password": "password for kubernetes user", "namespace": "OpenShift project or Kubernetes namespace", "fstab": "Optional: Specify fstab file on node. Default is /etc/fstab" }, "_db_comment": "Database file name", "db": "/var/lib/heketi/heketi.db", "_loglevel_comment": [ "Set log level. Choices are:", " none, critical, error, warning, info, debug", "Default is warning" ], # 默認設置爲debug,不設置時的默認值便是warning; # 日誌信息輸出在/var/log/message "loglevel" : "warning" } }
# 選擇ssh執行器,heketi服務器須要免密登錄GlusterFS集羣的各節點; # -t:祕鑰類型; # -q:安靜模式; # -f:指定生成祕鑰的目錄與名字,注意與heketi.json的ssh執行器中"keyfile"值一致; # -N:祕鑰密碼,」」即爲空 [root@heketi ~]# ssh-keygen -t rsa -q -f /etc/heketi/heketi_key -N "" # heketi服務由heketi用戶啓動,heketi用戶須要有新生成key的讀賦權,不然服務沒法啓動 [root@heketi ~]# chown heketi:heketi /etc/heketi/heketi_key # 分發公鑰; # -i:指定公鑰 [root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@172.30.200.81 [root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@172.30.200.82 [root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@172.30.200.83
# 經過yum安裝heketi,默認的systemd文件有1處錯誤; # /usr/lib/systemd/system/heketi.service文件的」-config=/etc/heketi/heketi.json」應該修改成」--config=/etc/heketi/heketi.json」; # 不然啓動時報」Error: unknown shorthand flag: 'c' in -config=/etc/heketi/heketi.json「錯,致使服務沒法啓動 [root@heketi ~]# systemctl enable heketi [root@heketi ~]# systemctl restart heketi [root@heketi ~]# systemctl status heketi
# 驗證 [root@heketi ~]# curl http://localhost:8080/hello
# 經過topology.json文件定義組建GlusterFS集羣; # topology指定了層級關係:clusters-->nodes-->node/devices-->hostnames/zone; # node/hostnames字段的manage填寫主機ip,指管理通道,在heketi服務器不能經過hostname訪問GlusterFS節點時間不能填寫hostname; # node/hostnames字段的storage填寫主機ip,指存儲數據通道,與manage能夠不同; # node/zone字段指定了node所處的故障域,heketi經過跨故障域建立副本,提升數據高可用性質,如能夠經過rack的不一樣區分zone值,建立跨機架的故障域; # devices字段指定GlusterFS各節點的盤符(能夠是多塊盤),必須是未建立文件系統的裸設備 [root@heketi ~]# vim /etc/heketi/topology.json { "clusters": [ { "nodes": [ { "node": { "hostnames": { "manage": [ "172.30.200.81" ], "storage": [ "172.30.200.81" ] }, "zone": 1 }, "devices": [ "/dev/sdb" ] }, { "node": { "hostnames": { "manage": [ "172.30.200.82" ], "storage": [ "172.30.200.82" ] }, "zone": 2 }, "devices": [ "/dev/sdb" ] }, { "node": { "hostnames": { "manage": [ "172.30.200.83" ], "storage": [ "172.30.200.83" ] }, "zone": 3 }, "devices": [ "/dev/sdb" ] } ] } ] }
# GlusterFS集羣各節點的glusterd服務已正常啓動,但沒必要組建受信存儲池; # heketi-cli命令行也可手動逐層添加cluster,node,device,volume等; # 「--server http://localhost:8080」:localhost執行heketi-cli時,可不指定; # 」--user admin --secret admin@123 「:heketi.json中設置了認證,執行heketi-cli時須要帶上認證信息,不然報」Error: Invalid JWT token: Unknown user」錯 [root@heketi ~]# heketi-cli --server http://localhost:8080 --user admin --secret admin@123 topology load --json=/etc/heketi/topology.json
# 查看heketi topology信息,此時volume與brick等未建立; # 經過」heketi-cli cluster info「能夠查看集羣相關信息; # 經過」heketi-cli node info「能夠查看節點相關信息; # 經過」heketi-cli device info「能夠查看device相關信息 [root@heketi ~]# heketi-cli --user admin --secret admin@123 topology info
kubernetes共享存儲供應模式:
基於StorageClass的動態存儲供應總體過程以下圖所示:
# provisioner:表示存儲分配器,須要根據後端存儲的不一樣而變動; # reclaimPolicy: 默認即」Delete」,刪除pvc後,相應的pv及後端的volume,brick(lvm)等一塊兒刪除;設置爲」Retain」時則保留數據,須要手工處理 # resturl:heketi API服務提供的url; # restauthenabled:可選參數,默認值爲」false」,heketi服務開啓認證時必須設置爲」true」; # restuser:可選參數,開啓認證時設置相應用戶名; # secretNamespace:可選參數,開啓認證時能夠設置爲使用持久化存儲的namespace; # secretName:可選參數,開啓認證時,須要將heketi服務的認證密碼保存在secret資源中; # clusterid:可選參數,指定集羣id,也能夠是1個clusterid列表,格式爲」id1,id2」; # volumetype:可選參數,設置卷類型及其參數,若是未分配卷類型,則有分配器決定卷類型;如」volumetype: replicate:3」表示3副本的replicate卷,」volumetype: disperse:4:2」表示disperse卷,其中‘4’是數據,’2’是冗餘校驗,」volumetype: none」表示distribute卷# [root@kubenode1 ~]# mkdir -p heketi [root@kubenode1 ~]# cd heketi/ [root@kubenode1 heketi]# vim gluster-heketi-storageclass.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: gluster-heketi-storageclass provisioner: kubernetes.io/glusterfs reclaimPolicy: Delete parameters: resturl: "http://172.30.200.80:8080" restauthenabled: "true" restuser: "admin" secretNamespace: "default" secretName: "heketi-secret" volumetype: "replicate:2" # 生成secret資源,其中」key」值須要轉換爲base64編碼格式 [root@kubenode1 heketi]# echo -n "admin@123" | base64 # 注意name/namespace與storageclass資源中定義一致; # 密碼必須有「kubernetes.io/glusterfs」 type [root@kubenode1 heketi]# cat heketi-secret.yaml apiVersion: v1 kind: Secret metadata: name: heketi-secret namespace: default data: # base64 encoded password. E.g.: echo -n "mypassword" | base64 key: YWRtaW5AMTIz type: kubernetes.io/glusterfs
# 建立secret資源 [root@kubenode1 heketi]# kubectl create -f heketi-secret.yaml # 建立storageclass資源; # 注意:storageclass資源建立後不可變動,如修改只能刪除後重建 [root@kubenode1 heketi]# kubectl create -f gluster-heketi-storageclass.yaml
# 查看storageclass資源 [root@kubenode1 heketi]# kubectl describe storageclass gluster-heketi-storageclass
# 注意「storageClassName」的對應關係 [root@kubenode1 heketi]# vim gluster-heketi-pvc.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: gluster-heketi-pvc spec: storageClassName: gluster-heketi-storageclass # ReadWriteOnce:簡寫RWO,讀寫權限,且只能被單個node掛載; # ReadOnlyMany:簡寫ROX,只讀權限,容許被多個node掛載; # ReadWriteMany:簡寫RWX,讀寫權限,容許被多個node掛載; accessModes: - ReadWriteOnce resources: requests: # 注意格式,不能寫「GB」 storage: 1Gi # 建立pvc資源 [root@kubenode1 heketi]# kubectl create -f gluster-heketi-pvc.yaml
# 查看PVC,狀態爲」Bound」; # 「Capacity」爲2G,是由於同步建立meta數據 [root@kubenode1 heketi]# kubectl describe pvc gluster-heketi-pvc
# 查看PV詳細信息,除容量,引用storageclass信息,狀態,回收策略等外,同時給出GlusterFS的Endpoint與path; [root@kubenode1 heketi]# kubectl get pv [root@kubenode1 heketi]# kubectl describe pv pvc-532cb8c3-cfc6-11e8-8fde-005056bfa8ba
# 查看endpoints資源,能夠從pv信息中獲取,固定格式:glusterfs-dynamic-PVC_NAME; # endpoints資源中指定了掛載存儲時的具體地址 [root@kubenode1 heketi]# kubectl describe endpoints glusterfs-dynamic-gluster-heketi-pvc
# volume與brick已經建立; # 主掛載點(通訊)在glusterfs01節點,其他兩個節點備選; # 兩副本的狀況下,glusterfs03節點並未建立brick [root@heketi ~]# heketi-cli --user admin --secret admin@123 topology info
# 以glusterfs01節點爲例 [root@glusterfs01 ~]# lsblk
[root@glusterfs01 ~]# df -Th
# 查看volume的具體信息:2副本的replicate卷; # 另有」vgscan」,」vgdisplay」也可查看邏輯卷組信息等 [root@glusterfs01 ~]# gluster volume list [root@glusterfs01 ~]# gluster volume info vol_308342f1ffff3aea7ec6cc72f6d13cd7
# 設置1個volume被pod引用,volume的類型爲」persistentVolumeClaim」 [root@kubenode1 heketi]# vim gluster-heketi-pod.yaml kind: Pod apiVersion: v1 metadata: name: gluster-heketi-pod spec: containers: - name: gluster-heketi-container image: busybox command: - sleep - "3600" volumeMounts: - name: gluster-heketi-volume mountPath: "/pv-data" readOnly: false volumes: - name: gluster-heketi-volume persistentVolumeClaim: claimName: gluster-heketi-pvc # 建立pod [root@kubenode1 heketi]# kubectl create -f gluster-heketi-pod.yaml
# 在容器的掛載目錄中建立文件 [root@kubenode1 heketi]# kubectl exec -it gluster-heketi-pod /bin/sh / # cd /pv-data /pv-data # echo "This is a file!" >> a.txt /pv-data # echo "This is b file!" >> b.txt /pv-data # ls
# 在GlusterFS節點對應掛載目錄查看建立的文件; # 掛載目錄經過」df -Th」或」lsblk」獲取 [root@glusterfs01 ~]# df -Th [root@glusterfs01 ~]# cd /var/lib/heketi/mounts/vg_af339b60319a63a77b05ddbec1b21bbe/brick_d712f1543476c4198d3869c682cdaa9a/brick/ [root@glusterfs01 brick]# ls [root@glusterfs01 brick]# cat a.txt [root@glusterfs01 brick]# cat b.txt
# 刪除Pod應用後,再刪除pvc [root@kubenode1 heketi]# kubectl delete -f gluster-heketi-pod.yaml [root@kubenode1 heketi]# kubectl delete -f gluster-heketi-pvc.yaml # k8s資源 [root@kubenode1 heketi]# kubectl get pvc [root@kubenode1 heketi]# kubectl get pv [root@kubenode1 heketi]# kubectl get endpoints
# heketi [root@heketi ~]# heketi-cli --user admin --secret admin@123 topology info
# GlusterFS節點 [root@glusterfs01 ~]# lsblk [root@glusterfs01 ~]# df -Th [root@glusterfs01 ~]# gluster volume list