# yaml格式的pod定義文件完整內容: apiVersion: v1 #必選,版本號,例如v1 kind: Pod #必選,Pod metadata: #必選,元數據 name: string #必選,Pod名稱 namespace: string #必選,Pod所屬的命名空間 labels: #自定義標籤 - name: string #自定義標籤名字 annotations: #自定義註釋列表 - name: string spec: #必選,Pod中容器的詳細定義 containers: #必選,Pod中容器列表 - name: string #必選,容器名稱 image: string #必選,容器的鏡像名稱 imagePullPolicy: [Always | Never | IfNotPresent] #獲取鏡像的策略 Alawys表示下載鏡像 IfnotPresent表示優先使用本地鏡像,不然下載鏡像,Nerver表示僅使用本地鏡像 command: [string] #容器的啓動命令列表,如不指定,使用打包時使用的啓動命令 args: [string] #容器的啓動命令參數列表 workingDir: string #容器的工做目錄 volumeMounts: #掛載到容器內部的存儲卷配置 - name: string #引用pod定義的共享存儲卷的名稱,需用volumes[]部分定義的的卷名 mountPath: string #存儲卷在容器內mount的絕對路徑,應少於512字符 readOnly: boolean #是否爲只讀模式 ports: #須要暴露的端口庫號列表 - name: string #端口號名稱 containerPort: int #容器須要監聽的端口號 hostPort: int #容器所在主機須要監聽的端口號,默認與Container相同 protocol: string #端口協議,支持TCP和UDP,默認TCP env: #容器運行前需設置的環境變量列表 - name: string #環境變量名稱 value: string #環境變量的值 resources: #資源限制和請求的設置 limits: #資源限制的設置 cpu: string #Cpu的限制,單位爲core數,將用於docker run --cpu-shares參數 memory: string #內存限制,單位能夠爲Mib/Gib,將用於docker run --memory參數 requests: #資源請求的設置 cpu: string #Cpu請求,容器啓動的初始可用數量 memory: string #內存清楚,容器啓動的初始可用數量 livenessProbe: #對Pod內個容器健康檢查的設置,當探測無響應幾回後將自動重啓該容器,檢查方法有exec、httpGet和tcpSocket,對一個容器只需設置其中一種方法便可 exec: #對Pod容器內檢查方式設置爲exec方式 command: [string] #exec方式須要制定的命令或腳本 httpGet: #對Pod內個容器健康檢查方法設置爲HttpGet,須要制定Path、port path: string port: number host: string scheme: string HttpHeaders: - name: string value: string tcpSocket: #對Pod內個容器健康檢查方式設置爲tcpSocket方式 port: number initialDelaySeconds: 0 #容器啓動完成後首次探測的時間,單位爲秒 timeoutSeconds: 0 #對容器健康檢查探測等待響應的超時時間,單位秒,默認1秒 periodSeconds: 0 #對容器監控檢查的按期探測時間設置,單位秒,默認10秒一次 successThreshold: 0 failureThreshold: 0 securityContext: privileged: false restartPolicy: [Always | Never | OnFailure] #Pod的重啓策略,Always表示一旦無論以何種方式終止運行,kubelet都將重啓,OnFailure表示只有Pod以非0退出碼退出才重啓,Nerver表示再也不重啓該Pod nodeSelector: obeject #設置NodeSelector表示將該Pod調度到包含這個label的node上,以key:value的格式指定 imagePullSecrets: #Pull鏡像時使用的secret名稱,以key:secretkey格式指定 - name: string hostNetwork: false #是否使用主機網絡模式,默認爲false,若是設置爲true,表示使用宿主機網絡 volumes: #在該pod上定義共享存儲卷列表 - name: string #共享存儲卷名稱 (volumes類型有不少種) emptyDir: {} #類型爲emtyDir的存儲卷,與Pod同生命週期的一個臨時目錄。爲空值 hostPath: string #類型爲hostPath的存儲卷,表示掛載Pod所在宿主機的目錄 path: string #Pod所在宿主機的目錄,將被用於同期中mount的目錄 secret: #類型爲secret的存儲卷,掛載集羣與定義的secre對象到容器內部 scretname: string items: - key: string path: string configMap: #類型爲configMap的存儲卷,掛載預約義的configMap對象到容器內部 name: string items: - key: string path: string
http://blog.itpub.net/28916011/viewspace-2214692/html
上一節,咱們建立的pod,是經過資源配置清單定義的,若是手工把這樣的pod刪除後,不會本身從新建立,這樣建立的pod叫自主式Pod。 node
在生產中,咱們不多使用自主式pod。 python
下面咱們學習另一種pod,叫控制器管理的Pod,控制器會按照定義的策略嚴格控制pod的數量,一旦發現pod數量少了,會當即自動創建出來新的pod;一旦發現pod多了,也會自動殺死多餘的Pod。 nginx
pod控制器:ReplicaSet控制器、Deployment控制器(必須掌握)、DaemonSet控制器、Job控制器redis
ReplicaSet控制器 :替用戶建立指定數量Pod的副本,並保證pod副本知足用戶指望的數量;並且更新自動擴縮容機制。replicat主要由三個組件組成:一、用戶指望的pod副本數量;二、標籤選擇器(控制管理pod副本);三、pod資源模板(若是pod數量少於指望的,就根據pod模板來新建必定數量的pod)。 docker
Deployment控制器 :Deployment經過控制replicaset來控制Pod。Deployment支持滾動更新和回滾,聲明式配置的功能。Deployment只關注羣體,而不關注個體。json
DaemonSet控制器 :用於確保集羣中的每個節點只運行一個pod副本(畫外音,若是沒有DaemonSet,一個節點能夠運行多個pod副本)。若是在集羣中新加一個節點,那麼這個新節點也會自動生成一個Pod副本。 vim
Job控制器 :對於那些 只作一次,只要完成就正常退出,沒完成才重構pod ,叫job控制器。 api
StatefulSet控制器: 管理有狀態應用,每個pod副本都是被單獨管理的。它擁有着本身獨有的標識。 tomcat
K8s在1.2+和1.7開始,支持TPR(third party resources 第三方資源)。在k8s 1.8+中,支持CDR(Custom Defined Reources,用戶自定義資源)。
[root@master manifests]# kubectl explain replicaset [root@master manifests]# kubectl explain rs (replicaset的簡寫) [root@master manifests]# kubectl explain rs.spec.template
[root@master manifests]# kubectl get deploy NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE myapp 2 2 2 0 10d mytomcat 3 3 3 3 10d nginx-deploy 1 1 1 1 13d [root@master manifests]# kubectl delete deploy myapp deployment.extensions "myapp" deleted [root@master manifests]# kubectl delete deploy nginx-deploy deployment.extensions "nginx-deploy" deleted
[root@master manifests]# cat rs-demo.yaml apiVersion: apps/v1 kind: ReplicaSet metadata: name: myapp namespace: default spec: #這是控制器的spec replicas: 2 #幾個副本 selector: #查看幫助:,標籤選擇器。 kubectl explain rs.spec.selector matchLabels: app: myapp release: canary template: # 查看幫助:模板 kubectl explain rs.spec.template metadata: # kubectl explain rs.spec.template.metadata name: myapp-pod labels: #必須符合上面定義的標籤選擇器selector裏面的內容 app: myapp release: canary environment: qa spec: #這是pod的spec containers: - name: myapp-container image: ikubernetes/nginx:latest ports: - name: http containerPort: 80
[root@master manifests]# kubectl create -f rs-demo.yaml replicaset.apps/myapp created
[root@master manifests]# kubectl get rs NAME DESIRED CURRENT READY AGE myapp 2 2 2 3m
看到上面的ready是2,表示兩個replcatset控制器都在正常運行。
[root@master manifests]# kubectl get pods --show-labels myapp-6kncv 1/1 Running 0 15m app=myapp,environment=qa,release=canary myapp-rbqjz 1/1 Running 0 15m app=myapp,environment=qa,release=canary 5m pod-demo 0/2 CrashLoopBackOff 2552 9d app=myapp,tier=frontend
上面就是replicatset控制器建立的兩個pod。
[root@master manifests]# kubectl describe pods myapp-6kncv IP: 10.244.2.44
[root@master manifests]# curl 10.244.2.44 Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
編輯replicatset的配置文件(這個文件不是咱們手工建立的,而是apiserver維護的)
[root@master manifests]# kubectl edit rs myapp
把裏面的replicas改爲5,保存後就當即生效.
[root@master manifests]# kubectl get pods --show-labels NAME READY STATUS RESTARTS AGE LABELS client 0/1 Error 0 11d run=client liveness-httpget-pod 1/1 Running 3 5d <none> myapp-6kncv 1/1 Running 0 31m app=myapp,environment=qa,release=canary myapp-c64mb 1/1 Running 0 3s app=myapp,environment=qa,release=canary myapp-fsrsg 1/1 Running 0 3s app=myapp,environment=qa,release=canary myapp-ljczj 0/1 ContainerCreating 0 3s app=myapp,environment=qa,release=canary myapp-rbqjz 1/1 Running 0 31m app=myapp,environment=qa,release=canary
一樣,也能夠用命令kubectl edit rs myapp升級版本,改裏面的image: ikubernetes/myapp:v2,這樣就變成v2版本了.
[root@master manifests]# kubectl get rs -o wide NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR myapp 5 5 5 1h myapp-container ikubernetes/myapp:v2 app=myapp,release=canary
不過,只有pod重建後,好比增長刪除Pod,纔會更新成v2版本。
咱們能夠經過Deployment控制器來動態更新pod的版本。
咱們先創建replicatset v2版本,而後一個一個的刪除replicatset v1版本中的Pod,這樣自動新建立的pod就會變成v2版本了。當pod所有變成v2版本後,replicatset v1並不會刪除,這樣一旦發現v2版本有問題,還能夠回退到v1版本。
一般deployment默認保留10版本的replicatset。
[root@master manifests]# kubectl explain deploy [root@master manifests]# kubectl explain deploy.spec [root@master manifests]# kubectl explain deploy.spec.strategy (更新策略) [root@master ~]# kubectl delete rs myapp
[root@master manifests]# cat deploy-demo.yaml apiVersion: apps/v1 kind: Deployment metadata: name: myapp-deploy namespace: default spec: replicas: 2 selector: #標籤選擇器 matchLabels: #匹配的標籤爲 app: myapp release: canary template: metadata: labels: app: myapp #和上面的myapp要匹配 release: canary spec: containers: - name: myapp image: ikubernetes/myapp:v1 ports: - name: http containerPort: 80
[root@master manifests]# kubectl apply -f deploy-demo.yaml deployment.apps/myapp-deploy created
apply表示是聲明式更新和建立。
[root@master manifests]# kubectl get deploy NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE myapp-deploy 2 2 2 2 1m
[root@master ~]# kubectl get rs NAME DESIRED CURRENT READY AGE myapp-deploy-69b47bc96d 2 2 2 17m
上面的rs式deployment自動建立的。
[root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE myapp-deploy-69b47bc96d-7jnwx 1/1 Running 0 19m myapp-deploy-69b47bc96d-btskk 1/1 Running 0 19m
修改配置文件deploy-demo.yaml,把replicas數字改爲3,而後再執行kubectl apply -f deploy-demo.yaml 便可使配置文件裏面的內容生效。
[root@master ~]# kubectl describe deploy myapp-deploy
root@master ~]# kubectl get pods -l app=myapp -w
-l使標籤過濾
-w是動態監控
[root@master ~]# kubectl get rs -o wide NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR myapp-deploy-69b47bc96d 2 2 2 1h myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=2560367528,release=canary
看滾動更新的歷史:
[root@master ~]# kubectl rollout history deployment myapp-deploy deployments "myapp-deploy" REVISION CHANGE-CAUSE 1 <none>
下面咱們把deployment改爲5個:咱們可使用vim deploy-demo.yaml方法,把裏面的replicas改爲5。固然,還可使用另一種方法,就patch方法,舉例以下。
[root@master manifests]# kubectl patch deployment myapp-deploy -p '{"spec":{"replicas":5}}' deployment.extensions/myapp-deploy patched
[root@master manifests]# kubectl get deploy NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE myapp-deploy 5 5 5 5 2h
[root@master manifests]# kubectl get pods NAME READY STATUS RESTARTS AGE myapp-deploy-69b47bc96d-7jnwx 1/1 Running 0 2h myapp-deploy-69b47bc96d-8gn7v 1/1 Running 0 59s myapp-deploy-69b47bc96d-btskk 1/1 Running 0 2h myapp-deploy-69b47bc96d-p5hpd 1/1 Running 0 59s myapp-deploy-69b47bc96d-zjv4p 1/1 Running 0 59s mytomcat-5f8c6fdcb-9krxn 1/1 Running 0 8h
下面修改策略:
[root@master manifests]# kubectl patch deployment myapp-deploy -p '{"spec":{"strategy":{"rollingUpdate":{"maxSurge":1,"maxUnavaliable":0}}}}' deployment.extensions/myapp-deploy patched
strategy:表示策略
maxSurge:表示最多幾個控制器存在
maxUnavaliable:表示最多有幾個控制器不可用
[root@master manifests]# kubectl describe deployment myapp-deploy RollingUpdateStrategy: 0 max unavailable, 1 max surge
下面咱們用set image命令,將鏡像myapp升級爲v3版本,而且將myapp-deploy控制器標記爲暫停。被pause命令暫停的資源不會被控制器協調使用,可使「kubectl rollout resume」命令恢復已暫停資源。
[root@master manifests]# kubectl set image deployment myapp-deploy myapp=ikubernetes/myapp:v3 && kubectl rollout pause deployment myapp-deploy
[root@master ~]# kubectl get pods -l app=myapp -w
中止暫停:
[root@master ~]# kubectl rollout resume deployment myapp-deploy deployment.extensions/myapp-deploy resumed
看到繼續更新了(即刪一個更新一個,刪一個更新一個):
[root@master manifests]# kubectl rollout status deployment myapp-deploy Waiting for deployment "myapp-deploy" rollout to finish: 2 out of 5 new replicas have been updated... Waiting for deployment spec update to be observed... Waiting for deployment spec update to be observed... Waiting for deployment "myapp-deploy" rollout to finish: 2 out of 5 new replicas have been updated... Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated... Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated... Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated... Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated... Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated... Waiting for deployment "myapp-deploy" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "myapp-deploy" rollout to finish: 1 old replicas are pending termination... deployment "myapp-deploy" successfully rolled out
[root@master manifests]# kubectl get rs -o wide NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR myapp-deploy-69b47bc96d 0 0 0 6h myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=2560367528,release=canary myapp-deploy-6bdcd6755d 5 5 5 3h myapp ikubernetes/myapp:v3 app=myapp,pod-template-hash=2687823118,release=canary mytomcat-5f8c6fdcb 3 3 3 12h mytomcat tomcat pod-template-hash=194729876,run=mytomcat
上面能夠看到myapp有v1和v3兩個版本。
[root@master manifests]# kubectl rollout history deployment myapp-deploy deployments "myapp-deploy" REVISION CHANGE-CAUSE 1 <none> 2 <none>
上面能夠看到有兩個歷史更新記錄。
下面咱們把v3回退到上一個版本(不指定就是上一個版本)。
[root@master manifests]# kubectl rollout undo deployment myapp-deploy --to-revision=1 deployment.extensions/myapp-deploy
能夠看到初版還原成第3版了:
[root@master manifests]# kubectl rollout history deployment myapp-deploy deployments "myapp-deploy" REVISION CHANGE-CAUSE 2 <none> 3 <none>
能夠看到正在工做的是v1版,即回退到了v1版。
[root@master manifests]# kubectl get rs -o wide NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR myapp-deploy-69b47bc96d 5 5 5 6h myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=2560367528,release=canary myapp-deploy-6bdcd6755d 0 0 0 3h myapp ikubernetes/myapp:v3 app=myapp,pod-template-hash=2687823118,release=canary
經過 https://hub.docker.com/r/ikubernetes/filebeat/tags/能夠看到filebeat的版本有哪些:
[root@node1 manifests]# docker pull ikubernetes/filebeat:5.6.5-alpine [root@node2 manifests]# docker pull ikubernetes/filebeat:5.6.5-alpine
node1和node2上都下載filebeat鏡像。
[root@node1 ~]# docker image inspect ikubernetes/filebeat:5.6.5-alpine
[root@master manifests]# kubectl explain pods.spec.containers.env
[root@master manifests]# cat ds-demo.yaml apiVersion: apps/v1 kind: DaemonSet metadata: name: myapp-ds namespace: default spec: selector: #標籤選擇器 matchLabels: #匹配的標籤爲 app: filebeat release: stable template: metadata: labels: app: filebeat #和上面的myapp要匹配 release: stable spec: containers: - name: myapp image: ikubernetes/myapp:v1 env: - name: REDIS_HOST value: redis.default.svc.cluster.local #隨便取的名字 name: REDIS_LOG_LEVEL value: info
[root@master manifests]# kubectl apply -f ds-demo.yaml daemonset.apps/myapp-ds created
看到myapp-ds已經運行起來了,而且是兩個myapp-ds,這是由於咱們有兩個Node節點。另外master節點上是不會運行myapp-ds控制器的,由於master有污點(除非你設置容許有污點,才能夠在master上容許myapp-ds)
[root@master manifests]# kubectl get pods NAME READY STATUS RESTARTS AGE myapp-ds-5tmdd 1/1 Running 0 1m myapp-ds-dkmjj 1/1 Running 0 1m
[root@master ~]# kubectl logs myapp-ds-dkmjj
[root@master manifests]# kubectl delete -f ds-demo.yaml
[root@master manifests]# cat ds-demo.yaml apiVersion: apps/v1 kind: Deployment metadata: name: redis namespace: default spec: replicas: 1 selector: matchLabels: app: redis role: logstor #日誌存儲角色 template: metadata: labels: app: redis role: logstor spec: #這個是容器的spec containers: - name: redis image: redis:4.0-alpine ports: - name: redis containerPort: 6379 #用減號隔離資源定義清單 --- apiVersion: apps/v1 kind: DaemonSet metadata: name: filebeat-ds namespace: default spec: selector: #標籤選擇器 matchLabels: #匹配的標籤爲 app: filebeat release: stable template: metadata: labels: app: filebeat #和上面的myapp要匹配 release: stable spec: containers: - name: filebeat image: ikubernetes/filebeat:5.6.6-alpine env: - name: REDIS_HOST #這是環境變量名,value是它的值 value: redis.default.svc.cluster.local #隨便取的名字 - name: REDIS_LOG_LEVEL value: info
[root@master manifests]# kubectl create -f ds-demo.yaml deployment.apps/redis created daemonset.apps/filebeat-ds created
[root@master manifests]# kubectl expose deployment redis --port=6379 ##這是在用expose方式建立service,其實還有一種方式是根據清單建立service service/redis exposed
[root@master manifests]# kubectl get svc #service的簡稱 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE redis ClusterIP 10.106.138.181 <none> 6379/TCP 48s
[root@master manifests]# kubectl get pods NAME READY STATUS RESTARTS AGE filebeat-ds-hgbhr 1/1 Running 0 9h filebeat-ds-xc7v7 1/1 Running 0 9h redis-5b5d6fbbbd-khws2 1/1 Running 0 33m
[root@master manifests]# kubectl exec -it redis-5b5d6fbbbd-khws2 -- /bin/sh /data # netstat -tnl Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State tcp 0 0 0.0.0.0:6379 0.0.0.0:* LISTEN tcp 0 0 :::6379 :::* LISTEN /data # nslookup redis.default.svc.cluster.local #看到DNS能夠解析出來ip nslookup: can't resolve '(null)': Name does not resolve Name: redis.default.svc.cluster.local Address 1: 10.106.138.181 redis.default.svc.cluster.local /data # redis-cli -h redis.default.svc.cluster.local redis.default.svc.cluster.local:6379> keys * (empty list or set) redis.default.svc.cluster.local:6379>
[root@master manifests]# kubectl exec -it filebeat-ds-pnk8b -- /bin/sh / # ps aux PID USER TIME COMMAND 1 root 0:00 /usr/local/bin/filebeat -e -c /etc/filebeat/filebeat.yml 15 root 0:00 /bin/sh 22 root 0:00 ps aux / # cat /etc/filebeat/filebeat.yml filebeat.registry_file: /var/log/containers/filebeat_registry filebeat.idle_timeout: 5s filebeat.spool_size: 2048 logging.level: info filebeat.prospectors: - input_type: log paths: - "/var/log/containers/*.log" - "/var/log/docker/containers/*.log" - "/var/log/startupscript.log" - "/var/log/kubelet.log" - "/var/log/kube-proxy.log" - "/var/log/kube-apiserver.log" - "/var/log/kube-controller-manager.log" - "/var/log/kube-scheduler.log" - "/var/log/rescheduler.log" - "/var/log/glbc.log" - "/var/log/cluster-autoscaler.log" symlinks: true json.message_key: log json.keys_under_root: true json.add_error_key: true multiline.pattern: '^\s' multiline.match: after document_type: kube-logs tail_files: true fields_under_root: true output.redis: hosts: ${REDIS_HOST:?No Redis host configured. Use env var REDIS_HOST to set host.} key: "filebeat" / # printenv REDIS_HOST=redis.default.svc.cluster.local / # nslookup redis.default.svc.cluster.local nslookup: can't resolve '(null)': Name does not resolve Name: redis.default.svc.cluster.local Address 1: 10.106.138.181 redis.default.svc.cluster.local
daemon-set也支持滾動更新。
[root@master manifests]# kubectl set image daemonsets filebeat-ds filebeat=ikubernetes/filebeat:5.5.7-alpine
說明: daemonsets filebeat-ds表示daemonsets名字叫filebeat-ds;
filebeat=ikubernetes/filebeat:5.5.7-alpine表示filebeat容器=ikubernetes/filebeat:5.5.7-alpine