1)cpu和內存 master:至少1c2g,推薦2c4g;node:至少1c2ghtml
2)linux系統 內核版本至少3.10,推薦CentOS7/RHEL7node
3)docker 至少1.9版本,推薦1.12+ 4)etcd 至少2.0版本,推薦3.0+python
kubernetes官方github地址 https://github.com/kubernetes/kubernetes/releaseslinux
部署節點------x1 : 運行這份 ansible 腳本的節點 etcd節點------x3 : 注意etcd集羣必須是1,3,5,7...奇數個節點nginx
master節點----x2 : 根據實際集羣規模能夠增長節點數,須要額外規劃一個master VIP(虛地址)git
lb節點--------x2 : 負載均衡節點兩個,安裝 haproxy+keepalivedgithub
node節點------x3 : 真正應用負載的節點,根據須要提高機器配置和增長節點數docker
機器規劃:
<table>
<tr>
<th>Ip</th>
<th>機名</th>
<th>角色</th>
<th>系統</th>
</tr>
<tr>
<th>192.168.2.10</th>
<th>master</th>
<th>deploy、master一、lb一、etcd</th>
<td rowspan="6">centos7.5 x86_64</td>
</tr>
<tr>
<th>192.168.2.11</th>
<th>node1</th>
<th>etcd、node</th>
</tr>
<tr>
<th>192.168.2.12</th>
<th>node2</th>
<th>etcd、node</th>
</tr>
<tr>
<th>192.168.2.13</th>
<th>node3</th>
<th>node</th>
</tr>
<tr>
<th>192.168.2.14</th>
<th>master2</th>
<th>master二、lb2</th>
</tr>
<tr>
<th>192.168.2.16</th>
<th></th>
<th>vip</th>
</tr>
</table>json
六臺機器,所有執行:vim
yum install epel-release yum update yum install python
yum install -y python-pip git pip install pip --upgrade -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com pip install --no-cache-dir ansible -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
奉上我使用多年的自動佈置key的腳本
#!/bin/bash keypath=/root/.ssh [ -d ${keypath} ] || mkdir -p ${keypath} rpm -q expect &> /dev/null || yum install expect -y ssh-keygen -t rsa -f /root/.ssh/id_rsa -P "" password=centos for host in `seq 10 14`;do expect <<EOF set timeout 5 spawn ssh-copy-id 192.168.2.$host expect { "yes/no" { send "yes\n";exp_continue } "password" { send "$password\n" } } expect eof EOF done
執行腳本,deploy自動copy key到目標主機
[root@master ~]# sh sshkey.sh
git clone https://github.com/gjmzj/kubeasz.git mkdir -p /etc/ansible mv kubeasz/* /etc/ansible/
從百度雲網盤下載二進制文件 https://pan.baidu.com/s/1c4RFaA#list/path=%2F
能夠根據本身所需版本,下載對應的tar包,這裏我下載1.12
通過一番折騰, 終把k8s.1-12-1.tar.gz的tar包放到了depoly上
tar zxvf k8s.1-12-1.tar.gz mv bin/* /etc/ansible/bin/
Example:
[root@master ~]# rz rz waiting to receive. Starting zmodem transfer. Press Ctrl+C to cancel. Transferring k8s.1-12-1.tar.gz... 100% 234969 KB 58742 KB/sec 00:00:04 0 Errors [root@master ~]# ls anaconda-ks.cfg ifcfg-ens192.bak k8s.1-12-1.tar.gz kubeasz [root@master ~]# tar zxf k8s.1-12-1.tar.gz [root@master ~]# ls anaconda-ks.cfg bin ifcfg-ens192.bak k8s.1-12-1.tar.gz kubeasz [root@master ~]# mv bin /etc/ansible/ mv:是否覆蓋"/etc/ansible/bin/readme.md"? y
cd /etc/ansible/ cp example/hosts.m-masters.example hosts //內容根據實際狀況修改
[deploy] 192.168.2.10 NTP_ENABLED=no # 'etcd' cluster must have odd member(s) (1,3,5,...) # variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster [etcd] 192.168.2.10 NODE_NAME=etcd1 192.168.2.11 NODE_NAME=etcd2 192.168.2.12 NODE_NAME=etcd3 [kube-master] 192.168.2.10 # 'loadbalance' node, with 'haproxy+keepalived' installed [lb] 192.168.2.10 LB_IF="eth0" LB_ROLE=backup # replace 'etho' with node's network interface 192.168.2.14 LB_IF="eth0" LB_ROLE=master [kube-node] 192.168.2.11 192.168.2.12 192.168.2.13 [vip] 192.168.2.15
修改完hosts,測試
ansible all -m ping [root@master ansible]# ansible all -m ping 192.168.2.11 | SUCCESS => { "changed": false, "ping": "pong" } 192.168.2.14 | SUCCESS => { "changed": false, "ping": "pong" } 192.168.2.12 | SUCCESS => { "changed": false, "ping": "pong" } 192.168.2.10 | SUCCESS => { "changed": false, "ping": "pong" } 192.168.2.13 | SUCCESS => { "changed": false, "ping": "pong" } 192.168.2.15 | SUCCESS => { "changed": false, "ping": "pong" }
ansible-playbook 01.prepare.yml
ansible-playbook 02.etcd.yml
檢查etcd節點健康情況:
執行bash
for ip in 10 11 12 ; do ETCDCTL_API=3 etcdctl --endpoints=https://192.168.2.$ip:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint healt; done
執行後:
https://192.168.2.10:2379 is healthy: successfully committed proposal: took = 857.393µs
https://192.168.2.11:2379 is healthy: successfully committed proposal: took = 1.0619ms
https://192.168.2.12:2379 is healthy: successfully committed proposal: took = 1.19245ms
或者 添加/etc/ansible/bin環境變量:
[root@master ansible]# vim /etc/profile.d/k8s.sh export PATH=$PATH:/etc/ansible/bin [root@master ansible]# for ip in 10 11 12 ; do ETCDCTL_API=3 etcdctl --endpoints=https://192.168.2.$ip:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint healt; done https://192.168.2.10:2379 is healthy: successfully committed proposal: took = 861.891µs https://192.168.2.11:2379 is healthy: successfully committed proposal: took = 1.061687ms https://192.168.2.12:2379 is healthy: successfully committed proposal: took = 909.274µs
ansible-playbook 03.docker.yml
ansible-playbook 04.kube-master.yml kubectl get componentstatus//查看集羣狀態 NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health":"true"} etcd-1 Healthy {"health":"true"} etcd-2 Healthy {"health":"true"}
ansible-playbook 05.kube-node.yml
查看node節點
kubectl get nodes [root@master ansible]# kubectl get nodes NAME STATUS ROLES AGE VERSION 192.168.2.10 Ready,SchedulingDisabled master 112s v1.12.1 192.168.2.11 Ready node 17s v1.12.1 192.168.2.12 Ready node 17s v1.12.1 192.168.2.13 Ready node 17s v1.12.1 192.168.2.14 Ready,SchedulingDisabled master 112s v1.12.1
ansible-playbook 06.network.yml kubectl get pod -n kube-system //查看kube-systemnamespace上的pod,從中能夠看到flannel相關的pod [root@master ansible]# kubectl get pod -n kube-system NAME READY STATUS RESTARTS AGE kube-flannel-ds-5d574 1/1 Running 0 47s kube-flannel-ds-6kpnm 1/1 Running 0 47s kube-flannel-ds-f2nfs 1/1 Running 0 47s kube-flannel-ds-gmbmv 1/1 Running 0 47s kube-flannel-ds-w5st7 1/1 Running 0 47s
ansible-playbook 07.cluster-addon.yml
查看kube-system namespace下的服務
kubectl get svc -n kube-system [root@master ~]# kubectl get svc -n kube-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kube-dns ClusterIP 10.68.0.2 <none> 53/UDP,53/TCP 10h kubernetes-dashboard NodePort 10.68.119.108 <none> 443:35065/TCP 10h metrics-server ClusterIP 10.68.235.9 <none> 443/TCP 10h
到此爲止,分步部署已經基本配置完畢了,下面就能夠查找登陸token登陸dashboard了:
[root@master ~]# kubectl get secret -n kube-system|grep admin admin-user-token-4zdgw kubernetes.io/service-account-token 3 9h [root@master ~]# kubectl describe secret admin-user-token-4zdgw -n kube-system Name: admin-user-token-4zdgw Namespace: kube-system Labels: <none> Annotations: kubernetes.io/service-account.name: admin-user kubernetes.io/service-account.uid: 72378c78-ee7d-11e8-a2a7-000c2931fb97 Type: kubernetes.io/service-account-token Data ==== ca.crt: 1346 bytes namespace: 11 bytes token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTR6ZGd3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI3MjM3OGM3OC1lZTdkLTExZTgtYTJhNy0wMDBjMjkzMWZiOTciLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.J0MjCSAP00RDvQgG1xBPvAYVo1oycXfoBh0dqdCzX1ByILyCHUqqxixuQdfE-pZqP15u6UV8OF3lGI_mHs5DBvNK0pCfaRICSo4SXSihJHKl_j9Bbozq9PjQ5d7CqHOFoXk04q0mWpJ5o0rJ6JX6Psx93Ch0uaXPPMLtzL0kolIF0j1tCFnsob8moczH06hfzo3sg8h0YCXyO6Z10VT7GMuLlwiG8XgWcplm-vcPoY_AWHnLV3RwAJH0u1q0IrMprvgTCuHighTaSjPeUe2VsXMhDpocJMoHQOoHirQKmiIAnanbIm4N1TO_5R1cqh-_gH7-MH8xefgWXoSrO-fo2w
登陸了帳號密碼後,用上面token在界面上登陸便可
也能夠查詢證.
[root@master ~]# kubectl get secret -n kube-system NAME TYPE DATA AGE admin-user-token-4zdgw kubernetes.io/service-account-token 3 10h coredns-token-98zvm kubernetes.io/service-account-token 3 10h default-token-zk5rj kubernetes.io/service-account-token 3 10h flannel-token-4gmtz kubernetes.io/service-account-token 3 10h kubernetes-dashboard-certs Opaque 0 10h kubernetes-dashboard-key-holder Opaque 2 10h kubernetes-dashboard-token-lcsd6 kubernetes.io/service-account-token 3 10h metrics-server-token-j4s2c kubernetes.io/service-account-token 3 10h [root@master ~]# kubectl get secret/admin-user-token-4zdgw -n kube-system -o yaml apiVersion: v1 data: ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR0akNDQXA2Z0F3SUJBZ0lVVFB3YVdFR0gyT2kwaHlVeGlJWnhFSUF3UFpVd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1lURUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEV6QVJCZ05WQkFNVENtdDFZbVZ5CmJtVjBaWE13SGhjTk1UZ3hNVEl5TVRZeE1EQXdXaGNOTXpNeE1URTRNVFl4TURBd1dqQmhNUXN3Q1FZRFZRUUcKRXdKRFRqRVJNQThHQTFVRUNCTUlTR0Z1WjFwb2IzVXhDekFKQmdOVkJBY1RBbGhUTVF3d0NnWURWUVFLRXdOcgpPSE14RHpBTkJnTlZCQXNUQmxONWMzUmxiVEVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU5zV1NweVVRcGYvWDFCaHNtUS9NUDVHVE0zcUFjWngKV3lKUjB0VEtyUDVWNStnSjNZWldjK01HSzlrY3h6OG1RUUczdldvNi9ENHIyZ3RuREVWaWxRb1dlTm0rR3hLSwpJNjkzczNlS2ovM1dIdGloOVA4TWp0RktMWnRvSzRKS09kUURYeGFHLzJNdzJEMmZnbzNJT2VDdlZzR0F3Qlc4ClYxMDh3dUVNdTIzMnhybFdSSFFWaTNyc0dmN3pJbkZzSFNOWFFDbXRMMHhubERlYnZjK2c2TWRtcWZraVZSdzIKNTFzZGxnbmV1aEFqVFJaRkYvT0lFWE4yUjIyYTJqZVZDbWNySEcvK2orU0tzTlpmeVVCb216NGRUcmRsV0JEUQpob3ZzSGkrTEtJVGNxZHBQV3MrZmxIQjlaL1FRUnM5MTZEREpxMHRWNFV6MEY0YjRsemJXaGdrQ0F3RUFBYU5tCk1HUXdEZ1lEVlIwUEFRSC9CQVFEQWdFR01CSUdBMVVkRXdFQi93UUlNQVlCQWY4Q0FRSXdIUVlEVlIwT0JCWUUKRklaN3NZczRjV0xtYnlwVUEwWUhGanc3Mk5jV01COEdBMVVkSXdRWU1CYUFGSVo3c1lzNGNXTG1ieXBVQTBZSApGanc3Mk5jV01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ2Eyell1NmVqMlRURWcyN1VOeGh4U0ZMaFJLTHhSClg5WnoyTmtuVjFQMXhMcU8xSHRUSmFYajgvL0wxUmdkQlRpR3JEOXNENGxCdFRRMmF2djJFQzZKeXJyS0xVelUKSWNiUXNpU0h4NkQ3S1FFWjFxQnZkNWRKVDluai9mMG94SjlxNDVmZTBJbWNiUndKWnA2WDJKbWtQSWZyYjYreQo2YUFTbzhaakliTktQN1Z1WndIQ1RPQUwzeUhVR2lJTEJtT1hKNldGRDlpTWVFMytPZE95ZHIwYzNvUmRXVW1aCkI1andlN2x2MEtVc2Y1SnBTS0JCbzZ3bkViNXhMdDRSYjBMa2RxMXZLTGFOMXUvbXFFc1ltbUk3MmRuaUdLSTkKakdDdkRqNVREaW55T1RQU005Vi81RE5OTFlLQkExaDRDTmVBRjE1RWlCay9EU055SzIrUTF3TVgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= namespace: a3ViZS1zeXN0ZW0= token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklpSjkuZXlKcGMzTWlPaUpyZFdKbGNtNWxkR1Z6TDNObGNuWnBZMlZoWTJOdmRXNTBJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5dVlXMWxjM0JoWTJVaU9pSnJkV0psTFhONWMzUmxiU0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVmpjbVYwTG01aGJXVWlPaUpoWkcxcGJpMTFjMlZ5TFhSdmEyVnVMVFI2WkdkM0lpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJhV05sWVdOamIzVnVkQzl6WlhKMmFXTmxMV0ZqWTI5MWJuUXVibUZ0WlNJNkltRmtiV2x1TFhWelpYSWlMQ0pyZFdKbGNtNWxkR1Z6TG1sdkwzTmxjblpwWTJWaFkyTnZkVzUwTDNObGNuWnBZMlV0WVdOamIzVnVkQzUxYVdRaU9pSTNNak0zT0dNM09DMWxaVGRrTFRFeFpUZ3RZVEpoTnkwd01EQmpNamt6TVdaaU9UY2lMQ0p6ZFdJaU9pSnplWE4wWlcwNmMyVnlkbWxqWldGalkyOTFiblE2YTNWaVpTMXplWE4wWlcwNllXUnRhVzR0ZFhObGNpSjkuSjBNakNTQVAwMFJEdlFnRzF4QlB2QVlWbzFveWNYZm9CaDBkcWRDelgxQnlJTHlDSFVxcXhpeHVRZGZFLXBacVAxNXU2VVY4T0YzbEdJX21IczVEQnZOSzBwQ2ZhUklDU280U1hTaWhKSEtsX2o5QmJvenE5UGpRNWQ3Q3FIT0ZvWGswNHEwbVdwSjVvMHJKNkpYNlBzeDkzQ2gwdWFYUFBNTHR6TDBrb2xJRjBqMXRDRm5zb2I4bW9jekgwNmhmem8zc2c4aDBZQ1h5TzZaMTBWVDdHTXVMbHdpRzhYZ1djcGxtLXZjUG9ZX0FXSG5MVjNSd0FKSDB1MXEwSXJNcHJ2Z1RDdUhpZ2hUYVNqUGVVZTJWc1hNaERwb2NKTW9IUU9vSGlyUUttaUlBbmFuYkltNE4xVE9fNVIxY3FoLV9nSDctTUg4eGVmZ1dYb1NyTy1mbzJ3 kind: Secret metadata: annotations: kubernetes.io/service-account.name: admin-user kubernetes.io/service-account.uid: 72378c78-ee7d-11e8-a2a7-000c2931fb97 creationTimestamp: 2018-11-22T17:38:38Z name: admin-user-token-4zdgw namespace: kube-system resourceVersion: "977" selfLink: /api/v1/namespaces/kube-system/secrets/admin-user-token-4zdgw uid: 7239bb01-ee7d-11e8-8c5c-000c29fd1c0f type: kubernetes.io/service-account-token
ServiceAccount 是一種帳號,可是不是爲集羣用戶(管理員、運維人員等)使用的,而是給運行在集羣中的 Pod 裏面的進程使用的。
[root@master ~]# kubectl get serviceaccount --all-namespaces NAMESPACE NAME SECRETS AGE default default 1 10h kube-public default 1 10h kube-system admin-user 1 10h kube-system coredns 1 10h kube-system default 1 10h kube-system flannel 1 10h kube-system kubernetes-dashboard 1 10h kube-system metrics-server 1 10h [root@master ~]# kubectl describe serviceaccount/default -n kube-system Name: default Namespace: kube-system Labels: <none> Annotations: <none> Image pull secrets: <none> Mountable secrets: default-token-zk5rj Tokens: default-token-zk5rj Events: <none> [root@master ~]# kubectl get secret/default-token-zk5rj -n kube-system NAME TYPE DATA AGE default-token-zk5rj kubernetes.io/service-account-token 3 10h [root@master ~]# kubectl get secret/default-token-zk5rj -n kube-system -o yaml apiVersion: v1 data: ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR0akNDQXA2Z0F3SUJBZ0lVVFB3YVdFR0gyT2kwaHlVeGlJWnhFSUF3UFpVd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1lURUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEV6QVJCZ05WQkFNVENtdDFZbVZ5CmJtVjBaWE13SGhjTk1UZ3hNVEl5TVRZeE1EQXdXaGNOTXpNeE1URTRNVFl4TURBd1dqQmhNUXN3Q1FZRFZRUUcKRXdKRFRqRVJNQThHQTFVRUNCTUlTR0Z1WjFwb2IzVXhDekFKQmdOVkJBY1RBbGhUTVF3d0NnWURWUVFLRXdOcgpPSE14RHpBTkJnTlZCQXNUQmxONWMzUmxiVEVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU5zV1NweVVRcGYvWDFCaHNtUS9NUDVHVE0zcUFjWngKV3lKUjB0VEtyUDVWNStnSjNZWldjK01HSzlrY3h6OG1RUUczdldvNi9ENHIyZ3RuREVWaWxRb1dlTm0rR3hLSwpJNjkzczNlS2ovM1dIdGloOVA4TWp0RktMWnRvSzRKS09kUURYeGFHLzJNdzJEMmZnbzNJT2VDdlZzR0F3Qlc4ClYxMDh3dUVNdTIzMnhybFdSSFFWaTNyc0dmN3pJbkZzSFNOWFFDbXRMMHhubERlYnZjK2c2TWRtcWZraVZSdzIKNTFzZGxnbmV1aEFqVFJaRkYvT0lFWE4yUjIyYTJqZVZDbWNySEcvK2orU0tzTlpmeVVCb216NGRUcmRsV0JEUQpob3ZzSGkrTEtJVGNxZHBQV3MrZmxIQjlaL1FRUnM5MTZEREpxMHRWNFV6MEY0YjRsemJXaGdrQ0F3RUFBYU5tCk1HUXdEZ1lEVlIwUEFRSC9CQVFEQWdFR01CSUdBMVVkRXdFQi93UUlNQVlCQWY4Q0FRSXdIUVlEVlIwT0JCWUUKRklaN3NZczRjV0xtYnlwVUEwWUhGanc3Mk5jV01COEdBMVVkSXdRWU1CYUFGSVo3c1lzNGNXTG1ieXBVQTBZSApGanc3Mk5jV01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ2Eyell1NmVqMlRURWcyN1VOeGh4U0ZMaFJLTHhSClg5WnoyTmtuVjFQMXhMcU8xSHRUSmFYajgvL0wxUmdkQlRpR3JEOXNENGxCdFRRMmF2djJFQzZKeXJyS0xVelUKSWNiUXNpU0h4NkQ3S1FFWjFxQnZkNWRKVDluai9mMG94SjlxNDVmZTBJbWNiUndKWnA2WDJKbWtQSWZyYjYreQo2YUFTbzhaakliTktQN1Z1WndIQ1RPQUwzeUhVR2lJTEJtT1hKNldGRDlpTWVFMytPZE95ZHIwYzNvUmRXVW1aCkI1andlN2x2MEtVc2Y1SnBTS0JCbzZ3bkViNXhMdDRSYjBMa2RxMXZLTGFOMXUvbXFFc1ltbUk3MmRuaUdLSTkKakdDdkRqNVREaW55T1RQU005Vi81RE5OTFlLQkExaDRDTmVBRjE1RWlCay9EU055SzIrUTF3TVgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= namespace: a3ViZS1zeXN0ZW0= token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklpSjkuZXlKcGMzTWlPaUpyZFdKbGNtNWxkR1Z6TDNObGNuWnBZMlZoWTJOdmRXNTBJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5dVlXMWxjM0JoWTJVaU9pSnJkV0psTFhONWMzUmxiU0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVmpjbVYwTG01aGJXVWlPaUprWldaaGRXeDBMWFJ2YTJWdUxYcHJOWEpxSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXpaWEoyYVdObExXRmpZMjkxYm5RdWJtRnRaU0k2SW1SbFptRjFiSFFpTENKcmRXSmxjbTVsZEdWekxtbHZMM05sY25acFkyVmhZMk52ZFc1MEwzTmxjblpwWTJVdFlXTmpiM1Z1ZEM1MWFXUWlPaUpoTkRKaE9EUmlaQzFsWlRkakxURXhaVGd0T0dNMVl5MHdNREJqTWpsbVpERmpNR1lpTENKemRXSWlPaUp6ZVhOMFpXMDZjMlZ5ZG1salpXRmpZMjkxYm5RNmEzVmlaUzF6ZVhOMFpXMDZaR1ZtWVhWc2RDSjkuSTBqQnNkVk1udUw1Q2J2VEtTTGtvcFFyd1h4NTlPNWt0YnVJUHVaemVNTjJjdmNvTE9icS1Xa0NRWWVaaDEwdUFsWVBUbnAtTkxLTFhLMUlrQVpab3dzcllKVmJsQmdQVmVOUDhtOWJ4dk5HXzlMVjcyNGNOaU1aT2pfQ0ExREJEVF91eHlXWlF0eUEwZ0RpeTBRem1zMnZrVEpaZFNHQUZ6V2NVdjA1QWlsdUxaUUhLZmMyOWpuVGJERUhxT2U1UXU2cjRXd05qLTA0SE5qUzFpMHpzUGFkbmR0bzVSaUgtcThaSTVVT3hsNGYyUXlTMlJrWmdtV0tEM2tRaVBWUHpLZDRqRmJsLWhHN3VhQjdBSUVwcHBaUzVYby1USEFhRjJTSi1SUUJfenhDTG42QUZhU0EwcVhrYWhGYmpET0s0OTlZRTVlblJrNkpIRmZVWnR0YmlB kind: Secret metadata: annotations: kubernetes.io/service-account.name: default kubernetes.io/service-account.uid: a42a84bd-ee7c-11e8-8c5c-000c29fd1c0f creationTimestamp: 2018-11-22T17:32:53Z name: default-token-zk5rj namespace: kube-system resourceVersion: "175" selfLink: /api/v1/namespaces/kube-system/secrets/default-token-zk5rj uid: a42daa94-ee7c-11e8-8c5c-000c29fd1c0f type: kubernetes.io/service-account-token
合併全部步驟的安裝,和分步安裝同樣的效果:
ansible-playbook 90.setup.yml
查看集羣信息:
kubectl cluster-info [root@master ~]# kubectl cluster-info Kubernetes master is running at https://192.168.2.16:8443 CoreDNS is running at https://192.168.2.16:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy kubernetes-dashboard is running at https://192.168.2.16:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
查看node/pod使用資源狀況:
kubectl top node kubectl top pod --all-namespaces
a) 建立nginx service
kubectl run nginx --image=nginx --expose --port=80
b)建立alpine 測試pod
kubectl run b1 -it --rm --image=alpine /bin/sh //進入到alpine內部 nslookup nginx.default.svc.cluster.local //結果以下 Address 1: 10.68.167.102 nginx.default.svc.cluster.local
1)deploy節點免密碼登陸node
ssh-copy-id 新node ip
2)修改/etc/ansible/hosts
[new-node] 192.168.2.15
3)執行安裝腳本
ansible-playbook /etc/ansible/20.addnode.yml
4)驗證
kubectl get node kubectl get pod -n kube-system -o wide
5)後續工做
修改/etc/ansible/hosts,將new-node裏面的全部ip所有移動到kube-node組裏去
增長master節點(略)
https://github.com/gjmzj/kubeasz/blob/master/docs/op/AddMaster.md 升級集羣
1)備份etcd
ETCDCTL_API=3 etcdctl snapshot save backup.db
查看備份文件信息
ETCDCTL_API=3 etcdctl --write-out=table snapshot status backup.db
2
)到本項目的根目錄kubeasz
cd /dir/to/kubeasz
拉取最新的代碼
git pull origin master
3)下載升級目標版本的kubernetes二進制包(百度網盤https://pan.baidu.com/s/1c4RFaA#list/path=%2F)解壓,並替換/etc/ansible/bin/下的二進制文件
4)docker升級(略),除非特別須要,不然不建議頻繁升級docker
5)若是接受業務中斷,執行:
ansible-playbook -t upgrade_k8s,restart_dockerd 22.upgrade.yml
6)不能接受短暫中斷,須要這樣作:
a)
ansible-playbook -t upgrade_k8s 22.upgrade.yml
b)到全部node上逐一:
kubectl cordon和kubectl drain //遷移業務pod systemctl restart docker kubectl uncordon //恢復pod
1)備份恢復原理:
備份,從運行的etcd集羣中備份數據到磁盤文件恢復,把etcd的備份文件恢復到etcd集羣中,而後據此重建整個集羣
2)若是使用kubeasz項目建立的集羣,除了備份etcd數據外,還須要備份CA證書文件,以及ansible的hosts文件
3)手動操做步驟:
mkdir -p ~/backup/k8s //建立備份目錄 ETCDCTL_API=3 etcdctl snapshot save ~/backup/k8s/snapshot.db //備份etcd數據 cp /etc/kubernetes/ssl/ca* ~/backup/k8s/ //備份ca證書
deploy節點執行
ansible-playbook /etc/ansible/99.clean.yml //模擬集羣崩
潰恢復步驟以下(在deploy節點):
a)恢復ca證書
mkdir -p /etc/kubernetes/ssl /backup/k8s cp ~/backup/k8s/* /backup/k8s/ cp /backup/k8s/ca* /etc/kubernetes/ssl/
b)重建集羣
只需執行前5步,其餘的在etcd保存着。
cd /etc/ansibl ansible-playbook 01.prepare.yml ansible-playbook 02.etcd.yml ansible-playbook 03.docker.yml ansible-playbook 04.kube-master.yml ansible-playbook 05.kube-node.yml
c)恢復etcd數據
中止服務
ansible etcd -m service -a 'name=etcd state=stopped'
清空文件
ansible etcd -m file -a 'name=/var/lib/etcd/member/ state=absent'
登陸全部的etcd節點,參照本etcd節點/etc/systemd/system/etcd.service的服務文件,替換以下{{}}中變量後執行
cd /backup/k8s/ ETCDCTL_API=3 etcdctl snapshot restore snapshot.db \ -name etcd1 \ -initialcluster etcd1=https://192.168.2.10:2380,etcd2=https://192.168.2.11:2380,etcd3=https://192.168.2.12:2380 \ -initial-cluster-token etcd-cluster-0 \ --initial-advertise-peer-urls https://192.168.2.10:2380
執行上面的步驟後,會生成{{ NODE_NAME }}.etcd目錄
cp -r etcd1.etcd/member /var/lib/etcd/ systemctl restart etcd
d)在deploy節點重建網絡
ansible-playbook /etc/ansible/tools/change_k8s_network.yml
4)不想手動恢復,能夠用ansible自動恢復
須要一鍵備份
ansible-playbook /etc/ansible/23.backup.yml
檢查/etc/ansible/roles/cluster-backup/files目錄下是否有文件
tree /etc/ansible/roles/cluster-backup/files/ //以下
├── ca #集羣CA相關備份 | ├── ca-config.json | ├── ca.csr | ├── ca-csr.json | ├── ca-key.pem | └── ca.pem ├── hosts # ansible hosts備份 | ├── hosts #最近的備份 | └── hosts-201807231642 |── readme.md └── snapshot # etcd數據備份 ├── snapshot-201807231642.db └── snapshot.db #最近的備份
模擬故障:
ansible-playbook /etc/ansible/99.clean.yml
修改文件/etc/ansible/roles/cluster-restore/defaults/main.yml,指定要恢復的etcd快照備份,若是不修改就是 新的一次
恢復操做:
ansible-playbook /etc/ansible/24.restore.yml ansible-playbook /etc/ansible/tools/change_k8s_network.yml
對集羣全部節點進行操做系統層面的安全加固
ansible-playbook roles/os-harden/os-harden.yml
,
詳情請參考os-harden項目
考文檔:
本文檔參考 https://github.com/gjmzj/kubeasz 擴展:使用kubeadm部署集羣 https://blog.frognew.com/2018/08/kubeadm-install-kubernetes-1.11.html