ansible自動部署k8s(集羣環境全爲ubuntu18.04)
node
root@k8s-master1:~# vim /etc/netplan/01-netcfg.yaml network: version: 2 renderer: networkd ethernets: eth0: dhcp4: no addresses: [192.168.30.10/24] gateway4: 192.168.30.2 nameservers: addresses: [192.168.30.2]
root@k8s-master1:~# netplan apply
root@k8s-master1:~# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000 link/ether 00:0c:29:83:c3:2e brd ff:ff:ff:ff:ff:ff inet 192.168.30.10/24 brd 192.168.30.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::20c:29ff:fe83:c32e/64 scope link valid_lft forever preferred_lft forever root@k8s-master1:~# ping www.baidu.com PING www.a.shifen.com (61.135.169.125) 56(84) bytes of data. 64 bytes from 61.135.169.125 (61.135.169.125): icmp_seq=1 ttl=128 time=7.14 ms 64 bytes from 61.135.169.125 (61.135.169.125): icmp_seq=2 ttl=128 time=7.39 ms 64 bytes from 61.135.169.125 (61.135.169.125): icmp_seq=3 ttl=128 time=7.11 ms ……
root@k8s-master1:~# vim /etc/apt/sources.list deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
root@k8s-master1:~# apt-get update
root@k8s-master1:~# apt-get upgrade
root@k8s-master1:~# apt-get install install python2.7 python-pip -y root@k8s-master1:~#ln -s /usr/bin/python2.7 /usr/bin/python #建立一個軟鏈接
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun #在各節點執行此命令安裝docker 10)全部須要在harbor倉庫拉取鏡像的節點,須要編輯名字解析hosts文件或指定配置好的DNS服務 root@k8s-master1:~#vim /etc/hosts …… 192.168.30.40 www.harbor.com ……
[root@harbor1 src]# pwd /usr/local/src [root@harbor1 src]# ll -rw-r--r-- 1 root root 580059210 Jul 5 16:47 harbor-offline-installer-v1.7.5.tgz
[root@harbor1 src]# tar xvf harbor-offline-installer-v1.7.5.tgz
[root@harbor1 src]# cd harbor/ [root@harbor1 harbor]# ll total 572840 drwxr-xr-x 3 root root 23 Aug 14 19:08 common -rw-r--r-- 1 root root 939 Apr 1 12:07 docker-compose.chartmuseum.yml -rw-r--r-- 1 root root 975 Apr 1 12:07 docker-compose.clair.yml -rw-r--r-- 1 root root 1434 Apr 1 12:07 docker-compose.notary.yml -rw-r--r-- 1 root root 5608 Apr 1 12:07 docker-compose.yml -rw-r--r-- 1 root root 8033 Apr 1 12:07 harbor.cfg -rw-r--r-- 1 root root 585234819 Apr 1 12:08 harbor.v1.7.5.tar.gz -rwxr-xr-x 1 root root 5739 Apr 1 12:07 install.sh -rw-r--r-- 1 root root 11347 Apr 1 12:07 LICENSE -rw-r--r-- 1 root root 1263409 Apr 1 12:07 open_source_license -rwxr-xr-x 1 root root 36337 Apr 1 12:07 prepare
[root@harbor1 harbor]# mkdir certs [root@harbor1 certs]# openssl genrsa -out harbor_ca.key #生成證書私鑰 Generating RSA private key, 2048 bit long modulus ...................+++ ............................................................................................................................+++ e is 65537 (0x10001) [root@harbor1 certs]# ll #查看生成的證書私鑰 total 4 -rw-r--r-- 1 root root 1679 Aug 14 19:20 harbor_ca.key [root@harbor1 harbor]# openssl req -x509 -new -nodes -key harbor_ca.key -subj /CN=www.harbor.com -days 3650 -out harbor_ca.crt #生成證書文件
[root@harbor1 harbor]# vim harbor.cfg hostname = www.harbor.com #訪問harbor服務器的域名 ui_url_protocol = https #強制指定爲https加密 ssl_cert = /usr/local/src/harbor/certs/harbor_ca.crt #指定公鑰文件(即自簽名證書)路徑 ssl_cert_key = /usr/local/src/harbor/certs/harbor_ca.key #指定私鑰文件路徑 harbor_admin_password = 123456 #登陸harbor倉庫的密碼 ……
[root@harbor1 harbor]# vim /etc/hosts …… 192.168.30.40 www.harbor.com
[root@harbor1 harbor]#curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun #安裝docker [root@harbor1 harbor]#apt-install dokcer-comose -y #安裝docker-compose
[root@harbor1 ~]# systemctl start docker [root@harbor1 ~]# systemctl enable docker
[root@harbor1 harbor]# pwd /usr/local/src/harbor [root@harbor1 harbor]# ./install.sh
[root@k8s-master1 ~]#mkdir -p /etc/docker/certs.d/www.harbor.com/ #先在k8s-master1節點建立docker下的存放harbor服務的證書目錄 [root@harbor1 ~]# scp /usr/local/src/harbor/certs/harbor_ca.crt 192.168.30.10:/etc/docker/certs.d/www.harbor.com/ #在harbor節點複製證書至master節點
[root@k8s-master1 ~]# docker login www.harbor.com Username: admin Password: Login Succeeded root@k8s-master1:~# cat /root/.docker/config.json #以上登陸成功後會自動生成此harbor服務的認證文件,下次登陸就無需密碼 { "auths": { "www.harbor.com": { "auth": "YWRtaW46MTIzNDU2" } }, "HttpHeaders": { "User-Agent": "Docker-Client/19.03.1 (linux)" }
[root@k8s-master1 ~]# docker pull alpine Using default tag: latest Trying to pull repository docker.io/library/alpine ... latest: Pulling from docker.io/library/alpine 050382585609: Pull complete Digest: sha256:6a92cd1fcdc8d8cdec60f33dda4db2cb1fcdcacf3410a8e05b3741f44a9b5998 Status: Downloaded newer image for docker.io/alpine:latest
[root@k8s-master1 ~]# docker tag alpine:latest www.harbor.com/base-images/alpine:v1 #修改tag版本號 [root@k8s-master1 ~]# docker push www.harbor.com/base-images/alpine #推送至harbor倉庫 The push refers to a repository [www.harbor.com/base-images/alpine] 1bfeebd65323: Pushed v1: digest: sha256:57334c50959f26ce1ee025d08f136c2292c128f84e7b229d1b0da5dac89e9866 size: 528
[root@k8s-master1 ~]# apt-get install sshpass -y
[root@k8s-master1 ~]# ssh-keygen The key's randomart image is: +---[RSA 2048]----+ | +oo.=X=o. | |. o ==oBo. | |.. +.o*.= | | .o..ooBooo | | . ..=.SE | | o =++.. | | . o.o | | | | | +----[SHA256]-----+
[root@harbor1 ~]# scp /usr/local/src/harbor/certs/harbor_ca.crt 192.168.30.10:/etc/docker/certs.d/www.harbor.com
[root@k8s-master1 data]# vim scp_pass_crt.sh #!/bin/bash #目標主機列表 IP="192.168.30.10 \ 192.168.30.11 \ 192.168.30.40 \ 192.168.30.50 \ 192.168.30.51 \ 192.168.30.52 \ 192.168.30.60 \ 192.168.30.61 \ 192.168.30.62" for node in ${IP};do sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no if [ $? -eq 0 ];then echo "${node} 祕鑰copy完成" echo "${node} 祕鑰copy完成,準備環境初始化....." ssh ${node} "mkdir /etc/docker/certs.d/www.harbor.com -p" echo "Harbor 證書目錄建立成功!" scp /etc/docker/certs.d/www.harbor.com/harbor_ca.crt ${node}:/etc/docker/certs.d/www.harbor.com/harbor_ca.crt scp -r /root/.docker ${node}:/root/ #複製docker登陸harbor倉庫生成的認證文件至各節點 echo "Harbor 證書拷貝成功!" else echo "${node} 祕鑰copy失敗" fi done
[root@k8s-master1 data]# bash scp_pass_crt.sh
[root@keepalive_haproxy ~]# vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 192.168.200.1 smtp_connect_timeout 30 router_id keep1 vrrp_skip_check_adv_addr # vrrp_strict vrrp_iptables vrrp_garp_interval 0 vrrp_gna_interval 0 } vrrp_instance VI_1 { state MASTER interface eth0 virtual_router_id 51 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.30.24 dev eth0 label eth0:0 } unicast_src_ip 192.168.30.20 unicast_peer{ 192.168.30.21 } }
…… listen stats mode http bind 0.0.0.0:9999 stats enable log global stats uri /haproxy-status stats auth admin:123456 listen web_port bind 192.168.30.24:6443 mode tcp log global server web1 192.168.30.10:6443 check inter 3000 fall 2 rise 5 #先暫時調度一個master節點 #server web2 192.168.30.11:6443 check inter 3000 fall 2 rise 5
[root@k8s-master1 ~]# yum install ansible -y 安裝ansible
[root@k8s-master1 ~]# git clone -b 0.6.1 https://github.com/easzlab/kubeasz
[root@k8s-master1 ~]# mv /etc/ansible/* /temp_file/
[root@k8s-master1 ~]# mv kubeasz/* /etc/ansible/
[root@k8s-master1 ansible]# cp example/hosts.m-masters.example ./hosts #
[root@k8s-master1 ansible]# vim hosts # 集羣部署節點:通常爲運行ansible 腳本的節點 # 變量 NTP_ENABLED (=yes/no) 設置集羣是否安裝 chrony 時間同步 [deploy] 192.168.30.10 NTP_ENABLED=yes #集羣安裝chrony 時間同步 # etcd集羣請提供以下NODE_NAME,注意etcd集羣必須是1,3,5,7...奇數個節點 [etcd] 192.168.30.50 NODE_NAME=etcd1 192.168.30.51 NODE_NAME=etcd2 192.168.30.52 NODE_NAME=etcd3 [new-etcd] # 預留組,後續添加etcd節點使用 #192.168.1.x NODE_NAME=etcdx [kube-master] 192.168.30.10 [new-master] # 預留組,後續添加master節點使用 #192.168.1.5 [kube-node] 192.168.30.60 192.168.30.61 [new-node] # 預留組,後續添加node節點使用 #192.168.30.xx # 參數 NEW_INSTALL:yes表示新建,no表示使用已有harbor服務器 # 若是不使用域名,能夠設置 HARBOR_DOMAIN="" [harbor] #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no # 負載均衡(目前已支持多於2節點,通常2節點就夠了) 安裝 haproxy+keepalived #[lb] #192.168.1.1 LB_ROLE=backup #192.168.1.2 LB_ROLE=master #【可選】外部負載均衡,用於自有環境負載轉發 NodePort 暴露的服務等 #[ex-lb] #192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250 #192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250 [all:vars] # ---------集羣主要參數--------------- #集羣部署模式:allinone, single-master, multi-master DEPLOY_MODE=multi-master #集羣主版本號,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13 K8S_VER="v1.13" # 集羣 MASTER IP即 LB節點VIP地址,爲區別與默認apiserver端口,設置VIP監聽的服務端口8443 # 公有云上請使用雲負載均衡內網地址和監聽端口 MASTER_IP="192.168.30.24" KUBE_APISERVER="https://{{ MASTER_IP }}:6443" # 集羣網絡插件,目前支持calico, flannel, kube-router, cilium CLUSTER_NETWORK="calico" # 服務網段 (Service CIDR),注意不要與內網已有網段衝突 SERVICE_CIDR="10.20.0.0/16" # POD 網段 (Cluster CIDR),注意不要與內網已有網段衝突 CLUSTER_CIDR="172.20.0.0/16" # 服務端口範圍 (NodePort Range) NODE_PORT_RANGE="30000-60000" # kubernetes 服務 IP (預分配,通常是 SERVICE_CIDR 中第一個IP) CLUSTER_KUBERNETES_SVC_IP="10.20.0.1" # 集羣 DNS 服務 IP (從 SERVICE_CIDR 中預分配) CLUSTER_DNS_SVC_IP="10.20.0.2" # 集羣 DNS 域名 CLUSTER_DNS_DOMAIN="jie.local." # 集羣basic auth 使用的用戶名和密碼 BASIC_AUTH_USER="admin" BASIC_AUTH_PASS="123456" # ---------附加參數-------------------- #默認二進制文件目錄 bin_dir="/usr/bin" #證書目錄 ca_dir="/etc/kubernetes/ssl" #部署目錄,即 ansible 工做目錄,建議不要修改 base_dir="/etc/ansible"
[root@k8s-master1 ~]# ll /data -rw-r--r-- 1 root root 219910183 Jul 6 09:07 k8s.1-13-5.tar.gz
[root@k8s-master1 ~]# tar xvf k8s.1-13-5.tar.gz
[root@k8s-master1 bin]# mv /data/bin/* /etc/ansible/bin/
[root@k8s-master1 bin]# ./kubelet --version Kubernetes v1.13.5
[root@k8s-master1 ansible]# ansible-playbook 01.prepare.yml changed: [192.168.30.10] => (item=cfssljson) changed: [192.168.30.10] => (item=kubectl) TASK [deploy : 讀取ca證書stat信息] ******************************************************************************************** ok: [192.168.30.10] TASK [deploy : 準備CA配置文件] ************************************************************************************************ changed: [192.168.30.10] TASK [deploy : 準備CA簽名請求] ************************************************************************************************ changed: [192.168.30.10] TASK [deploy : 生成 CA 證書和私鑰] ********************************************************************************************* changed: [192.168.30.10] TASK [deploy : 準備kubectl使用的admin 證書籤名請求] ******************************************************************************** changed: [192.168.30.10] TASK [deploy : 建立 admin證書與私鑰] ******************************************************************************************* changed: [192.168.30.10] TASK [deploy : 設置集羣參數] ************************************************************************************************** changed: [192.168.30.10] TASK [deploy : 設置客戶端認證參數] *********************************************************************************************** changed: [192.168.30.10] TASK [deploy : 設置上下文參數] ************************************************************************************************* changed: [192.168.30.10] TASK [deploy : 選擇默認上下文] ************************************************************************************************* changed: [192.168.30.10] TASK [deploy : 準備kube-proxy 證書籤名請求] ************************************************************************************* changed: [192.168.30.10] TASK [deploy : 建立 kube-proxy證書與私鑰] ************************************************************************************** changed: [192.168.30.10] TASK [deploy : 設置集羣參數] ************************************************************************************************** changed: [192.168.30.10] TASK [deploy : 設置客戶端認證參數] *********************************************************************************************** changed: [192.168.30.10] TASK [deploy : 設置上下文參數] ************************************************************************************************* changed: [192.168.30.10] TASK [deploy : 選擇默認上下文] ************************************************************************************************* changed: [192.168.30.10] TASK [deploy : 移動 kube-proxy.kubeconfig] ******************************************************************************** changed: [192.168.30.10] TASK [deploy : 安裝 rsync] ************************************************************************************************ ok: [192.168.30.10] [WARNING]: Could not match supplied host pattern, ignoring: lb PLAY [kube-master,kube-node,deploy,etcd,lb] ***************************************************************************** TASK [prepare : 刪除centos/redhat默認安裝] ************************************************************************************ changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.10] changed: [192.168.30.51] changed: [192.168.30.50] changed: [192.168.30.52] TASK [prepare : 添加EPEL倉庫] *********************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.51] changed: [192.168.30.50] changed: [192.168.30.10] changed: [192.168.30.52] TASK [prepare : 安裝基礎軟件包] ************************************************************************************************ changed: [192.168.30.51] changed: [192.168.30.61] changed: [192.168.30.10] changed: [192.168.30.50] changed: [192.168.30.60] changed: [192.168.30.52] TASK [prepare : 臨時關閉 selinux] ******************************************************************************************* changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.50] changed: [192.168.30.10] changed: [192.168.30.51] changed: [192.168.30.52] TASK [prepare : 永久關閉 selinux] ******************************************************************************************* ok: [192.168.30.51] ok: [192.168.30.50] ok: [192.168.30.60] ok: [192.168.30.61] ok: [192.168.30.10] ok: [192.168.30.52] TASK [prepare : 禁用系統 swap] ********************************************************************************************** changed: [192.168.30.10] changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.52] changed: [192.168.30.51] changed: [192.168.30.50] TASK [prepare : 刪除fstab swap 相關配置] ************************************************************************************** ok: [192.168.30.60] ok: [192.168.30.61] ok: [192.168.30.10] ok: [192.168.30.50] ok: [192.168.30.51] ok: [192.168.30.52] TASK [prepare : 加載內核模塊] ************************************************************************************************* ok: [192.168.30.10] => (item=br_netfilter) changed: [192.168.30.51] => (item=br_netfilter) changed: [192.168.30.61] => (item=br_netfilter) changed: [192.168.30.60] => (item=br_netfilter) changed: [192.168.30.50] => (item=br_netfilter) changed: [192.168.30.51] => (item=ip_vs) changed: [192.168.30.61] => (item=ip_vs) changed: [192.168.30.51] => (item=ip_vs_rr) changed: [192.168.30.50] => (item=ip_vs) changed: [192.168.30.60] => (item=ip_vs) changed: [192.168.30.10] => (item=ip_vs) changed: [192.168.30.61] => (item=ip_vs_rr) changed: [192.168.30.51] => (item=ip_vs_wrr) changed: [192.168.30.50] => (item=ip_vs_rr) changed: [192.168.30.60] => (item=ip_vs_rr) changed: [192.168.30.61] => (item=ip_vs_wrr) changed: [192.168.30.51] => (item=ip_vs_sh) changed: [192.168.30.10] => (item=ip_vs_rr) changed: [192.168.30.50] => (item=ip_vs_wrr) changed: [192.168.30.60] => (item=ip_vs_wrr) changed: [192.168.30.61] => (item=ip_vs_sh) changed: [192.168.30.51] => (item=nf_conntrack_ipv4) changed: [192.168.30.60] => (item=ip_vs_sh) changed: [192.168.30.50] => (item=ip_vs_sh) changed: [192.168.30.61] => (item=nf_conntrack_ipv4) changed: [192.168.30.10] => (item=ip_vs_wrr) ok: [192.168.30.51] => (item=nf_conntrack) changed: [192.168.30.60] => (item=nf_conntrack_ipv4) ok: [192.168.30.61] => (item=nf_conntrack) changed: [192.168.30.50] => (item=nf_conntrack_ipv4) ok: [192.168.30.60] => (item=nf_conntrack) ok: [192.168.30.50] => (item=nf_conntrack) changed: [192.168.30.10] => (item=ip_vs_sh) changed: [192.168.30.52] => (item=br_netfilter) ok: [192.168.30.10] => (item=nf_conntrack_ipv4) changed: [192.168.30.52] => (item=ip_vs) changed: [192.168.30.52] => (item=ip_vs_rr) ok: [192.168.30.10] => (item=nf_conntrack) changed: [192.168.30.52] => (item=ip_vs_wrr) changed: [192.168.30.52] => (item=ip_vs_sh) changed: [192.168.30.52] => (item=nf_conntrack_ipv4) ok: [192.168.30.52] => (item=nf_conntrack) TASK [prepare : 啓用systemd自動加載模塊服務] ************************************************************************************** ok: [192.168.30.61] ok: [192.168.30.51] ok: [192.168.30.50] ok: [192.168.30.60] ok: [192.168.30.52] ok: [192.168.30.10] TASK [prepare : 增長內核模塊開機加載配置] ******************************************************************************************* changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] changed: [192.168.30.10] TASK [prepare : 設置系統參數] ************************************************************************************************* changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] changed: [192.168.30.10] TASK [prepare : 生效系統參數] ************************************************************************************************* changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.10] changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] TASK [prepare : 設置系統 ulimits] ******************************************************************************************* changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] changed: [192.168.30.10] TASK [prepare : prepare some dirs] ************************************************************************************** ok: [192.168.30.60] => (item=/usr/bin) ok: [192.168.30.61] => (item=/usr/bin) ok: [192.168.30.10] => (item=/usr/bin) ok: [192.168.30.51] => (item=/usr/bin) ok: [192.168.30.50] => (item=/usr/bin) changed: [192.168.30.60] => (item=/etc/kubernetes/ssl) changed: [192.168.30.61] => (item=/etc/kubernetes/ssl) changed: [192.168.30.50] => (item=/etc/kubernetes/ssl) changed: [192.168.30.51] => (item=/etc/kubernetes/ssl) ok: [192.168.30.10] => (item=/etc/kubernetes/ssl) ok: [192.168.30.52] => (item=/usr/bin) changed: [192.168.30.52] => (item=/etc/kubernetes/ssl) TASK [prepare : 分發證書工具 CFSSL] ******************************************************************************************* changed: [192.168.30.60] => (item=cfssl) changed: [192.168.30.61] => (item=cfssl) changed: [192.168.30.51] => (item=cfssl) changed: [192.168.30.50] => (item=cfssl) ok: [192.168.30.10] => (item=cfssl) changed: [192.168.30.60] => (item=cfssl-certinfo) changed: [192.168.30.61] => (item=cfssl-certinfo) changed: [192.168.30.51] => (item=cfssl-certinfo) changed: [192.168.30.50] => (item=cfssl-certinfo) changed: [192.168.30.60] => (item=cfssljson) changed: [192.168.30.51] => (item=cfssljson) changed: [192.168.30.61] => (item=cfssljson) changed: [192.168.30.50] => (item=cfssljson) ok: [192.168.30.10] => (item=cfssl-certinfo) changed: [192.168.30.52] => (item=cfssl) ok: [192.168.30.10] => (item=cfssljson) changed: [192.168.30.52] => (item=cfssl-certinfo) changed: [192.168.30.52] => (item=cfssljson) TASK [prepare : 設置本地 bin 目錄權限] ****************************************************************************************** ok: [192.168.30.10] TASK [prepare : 寫入環境變量$PATH] ******************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] changed: [192.168.30.50] changed: [192.168.30.10] changed: [192.168.30.51] changed: [192.168.30.52] PLAY [lb] *************************************************************************************************************** skipping: no hosts matched PLAY RECAP ************************************************************************************************************** 192.168.30.10 : ok=38 changed=29 unreachable=0 failed=0 skipped=16 rescued=0 ignored=0 192.168.30.50 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0 192.168.30.51 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0 192.168.30.52 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0 192.168.30.60 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0 192.168.30.61 : ok=17 changed=13 unreachable=0 failed=0 skipped=14 rescued=0 ignored=0
[root@k8s-master1 ansible]# bin/etcdctl -v etcdctl version: 3.3.10 API version: 2
[root@k8s-master1 templates]# tail -30 /var/log/messages …… Aug 20 22:45:24 k8s-master1 kube-apiserver: E0820 22:45:24.933021 25576 watcher.go:208] watch chan error: etcdserver: mvcc: required revision has been compacted Aug 20 22:45:24 k8s-master1 kube-controller-manager: W0820 22:45:24.933345 25595 reflector.go:256] k8s.io/client-go/informers/factory.go:132: watch of *v1beta1.Event ended with: The resourceVersion for the provided watch is too old. ……
github官網搜索etcd並點擊進入
找到etcd-3.2.24這個版本
python
點擊Assets資產下載etcd-v3.2.24-linux-amd64.tar.gz壓縮文件
[root@k8s-master1 etcd]# pwd /data/etcd [root@k8s-master1 etcd]# tar xvf etcd-v3.2.24-linux-amd64.tar.gz [root@k8s-master1 etcd]# cp etcd-v3.2.24-linux-amd64/etcd* /etc/ansible/bin/ [root@k8s-master1 ansible]# pwd /etc/ansible [root@k8s-master1 ansible]# bin/etcdctl -v #執行命令檢查etcd版本 etcdctl version: 3.2.24 #版本以更改 API version: 2
[root@k8s-master1 ansible]# ansible-playbook 02.etcd.yml
ok: [192.168.30.51] => (item=/etc/kubernetes/ssl)
changed: [192.168.30.52] => (item=/etc/etcd/ssl)
changed: [192.168.30.50] => (item=/etc/etcd/ssl)
changed: [192.168.30.51] => (item=/etc/etcd/ssl)
changed: [192.168.30.52] => (item=/var/lib/etcd)
changed: [192.168.30.50] => (item=/var/lib/etcd)
changed: [192.168.30.51] => (item=/var/lib/etcd)linux
TASK [etcd : 下載etcd二進制文件] ****************************************************** changed: [192.168.30.51] => (item=etcd) changed: [192.168.30.50] => (item=etcd) changed: [192.168.30.52] => (item=etcd) changed: [192.168.30.50] => (item=etcdctl) changed: [192.168.30.52] => (item=etcdctl) changed: [192.168.30.51] => (item=etcdctl) TASK [etcd : 分發證書相關] *********************************************************** changed: [192.168.30.50 -> 192.168.30.10] => (item=ca.pem) changed: [192.168.30.52 -> 192.168.30.10] => (item=ca.pem) changed: [192.168.30.51 -> 192.168.30.10] => (item=ca.pem) changed: [192.168.30.52 -> 192.168.30.10] => (item=ca-key.pem) changed: [192.168.30.51 -> 192.168.30.10] => (item=ca-key.pem) changed: [192.168.30.50 -> 192.168.30.10] => (item=ca-key.pem) changed: [192.168.30.52 -> 192.168.30.10] => (item=ca.csr) changed: [192.168.30.50 -> 192.168.30.10] => (item=ca.csr) changed: [192.168.30.51 -> 192.168.30.10] => (item=ca.csr) changed: [192.168.30.52 -> 192.168.30.10] => (item=ca-config.json) changed: [192.168.30.50 -> 192.168.30.10] => (item=ca-config.json) changed: [192.168.30.51 -> 192.168.30.10] => (item=ca-config.json) TASK [etcd : 讀取etcd證書stat信息] *************************************************** ok: [192.168.30.50] ok: [192.168.30.51] ok: [192.168.30.52] TASK [etcd : 建立etcd證書請求] ******************************************************* changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] TASK [etcd : 建立 etcd證書和私鑰] ***************************************************** changed: [192.168.30.52] changed: [192.168.30.51] changed: [192.168.30.50] TASK [etcd : 建立etcd的systemd unit文件] ******************************************** changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] TASK [etcd : 開機啓用etcd服務] ******************************************************* changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] TASK [etcd : 開啓etcd服務] ********************************************************* changed: [192.168.30.52] changed: [192.168.30.51] changed: [192.168.30.50] TASK [etcd : 以輪詢的方式等待服務同步完成] *************************************************** changed: [192.168.30.50] changed: [192.168.30.51] changed: [192.168.30.52] PLAY RECAP ********************************************************************* 192.168.30.50 : ok=11 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 192.168.30.51 : ok=11 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 192.168.30.52 : ok=11 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
[root@k8s-etcd-1 ~]# systemctl status etcd
git
[root@k8s-etcd-1 ~]# export NODE_IPS="192.168.30.50 192.168.30.51 192.168.30.52" #聲明變量 [root@k8s-etcd-1 ~]# for ip in ${NODE_IPS};do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health;done https://192.168.30.50:2379 is healthy: successfully committed proposal: took = 1.867785ms #必須都看到 successfully健康成功檢測信息 https://192.168.30.51:2379 is healthy: successfully committed proposal: took = 3.900401ms #必須都看到 successfully健康成功檢測信息 https://192.168.30.52:2379 is healthy: successfully committed proposal: took = 3.015816ms #必須都看到 successfully健康成功檢測信息
[root@k8s-master1 ansible]# ansible-playbook 03.docker.yml
[root@k8s-master1 ansible]# ansible-playbook 04.kube-master.yml
TASK [kube-master : 分發證書相關] *****
ok: [192.168.30.10 -> 192.168.30.10] => (item=admin.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=admin-key.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-key.pem)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.csr)
ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-config.json)github
TASK [kube-master : 建立 kubernetes 證書籤名請求] ******************************************************************************************************* ok: [192.168.30.10] TASK [kube-master : 建立 kubernetes 證書和私鑰] ******************************************************************************************************** changed: [192.168.30.10] TASK [kube-master : 建立 aggregator proxy證書籤名請求] ************************************************************************************************** ok: [192.168.30.10] TASK [kube-master : 建立 aggregator-proxy證書和私鑰] *************************************************************************************************** changed: [192.168.30.10] TASK [kube-master : 建立 basic-auth.csv] ********************************************************************************************************** ok: [192.168.30.10] TASK [kube-master : 建立kube-apiserver的systemd unit文件] ******************************************************************************************** ok: [192.168.30.10] TASK [kube-master : 建立kube-controller-manager的systemd unit文件] *********************************************************************************** ok: [192.168.30.10] TASK [kube-master : 建立kube-scheduler的systemd unit文件] ******************************************************************************************** ok: [192.168.30.10] TASK [kube-master : enable master 服務] *********************************************************************************************************** changed: [192.168.30.10] TASK [kube-master : 啓動 master 服務] *************************************************************************************************************** changed: [192.168.30.10] TASK [kube-master : 以輪詢的方式等待master服務啓動完成] ******************************************************************************************************* changed: [192.168.30.10 -> 192.168.30.10] TASK [kube-node : 建立kube-node 相關目錄] ************************************************************************************************************* ok: [192.168.30.10] => (item=/var/lib/kubelet) ok: [192.168.30.10] => (item=/var/lib/kube-proxy) ok: [192.168.30.10] => (item=/etc/cni/net.d) ok: [192.168.30.10] => (item=/root/.kube) TASK [kube-node : 下載 kubelet,kube-proxy 二進制和基礎 cni plugins] ************************************************************************************* ok: [192.168.30.10] => (item=kubectl) ok: [192.168.30.10] => (item=kubelet) ok: [192.168.30.10] => (item=kube-proxy) ok: [192.168.30.10] => (item=bridge) ok: [192.168.30.10] => (item=host-local) ok: [192.168.30.10] => (item=loopback) TASK [kube-node : 分發 kubeconfig配置文件] ************************************************************************************************************ ok: [192.168.30.10 -> 192.168.30.10] TASK [kube-node : 添加 kubectl 命令自動補全] ************************************************************************************************************ ok: [192.168.30.10] TASK [kube-node : 分發證書相關] *********************************************************************************************************************** ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.pem) ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-key.pem) ok: [192.168.30.10 -> 192.168.30.10] => (item=ca.csr) ok: [192.168.30.10 -> 192.168.30.10] => (item=ca-config.json) TASK [kube-node : 準備kubelet 證書籤名請求] ************************************************************************************************************* ok: [192.168.30.10] TASK [kube-node : 建立 kubelet 證書與私鑰] ************************************************************************************************************* changed: [192.168.30.10] TASK [kube-node : 設置集羣參數] *********************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 設置客戶端認證參數] ******************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 設置上下文參數] ********************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 選擇默認上下文] ********************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 移動 kubelet.kubeconfig] ******************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 準備 cni配置文件] ******************************************************************************************************************* ok: [192.168.30.10] TASK [kube-node : 建立kubelet的systemd unit文件] ***************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 開機啓用kubelet 服務] *************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 開啓kubelet 服務] ***************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 安裝kube-proxy.kubeconfig配置文件] ************************************************************************************************** ok: [192.168.30.10 -> 192.168.30.10] TASK [kube-node : 建立kube-proxy 服務文件] ************************************************************************************************************ ok: [192.168.30.10] TASK [kube-node : 開機啓用kube-proxy 服務] ************************************************************************************************************ changed: [192.168.30.10] TASK [kube-node : 開啓kube-proxy 服務] ************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 輪詢等待kubelet啓動] **************************************************************************************************************** changed: [192.168.30.10] TASK [kube-node : 輪詢等待node達到Ready狀態] ************************************************************************************************************ changed: [192.168.30.10 -> 192.168.30.10] TASK [kube-node : 設置node節點role] ***************************************************************************************************************** changed: [192.168.30.10 -> 192.168.30.10] TASK [Making master nodes SchedulingDisabled] *************************************************************************************************** changed: [192.168.30.10 -> 192.168.30.10] TASK [Setting master role name] ***************************************************************************************************************** changed: [192.168.30.10 -> 192.168.30.10] PLAY RECAP ************************************************************************************************************************************** 192.168.30.10 : ok=39 changed=21 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
[root@k8s-master1 ansible]# kubectl get nodes NAME STATUS ROLES AGE VERSION 192.168.30.10 Ready,SchedulingDisabled master 86m v1.13.5
[root@k8s-master1 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 #先在阿里雲官網將鏡像拉取下來 [root@k8s-master1 ~]# docker login www.harbor.com #登陸harbor服務 Username (admin): admin Password: Login Succeeded [root@k8s-master1 ~]# docker images #查看拉取的鏡像 REPOSITORY TAG IMAGE ID CREATED SIZE docker.io/alpine latest b7b28af77ffe 5 weeks ago 5.58 MB www.harbor.com/base-images/alpine v1 b7b28af77ffe 5 weeks ago 5.58 MB docker.io/mirrorgooglecontainers/pause-amd64 3.1 da86e6ba6ca1 20 months ago 742 kB [root@k8s-master1 ~]# [root@k8s-master1 ~]# docker tag docker.io/mirrorgooglecontainers/pause-amd64:3.1 www.harbor.com/base-images/mirrorgooglecontainers/pause-amd64:3.1 #打上tag號 [root@k8s-master1 ~]# docker push www.harbor.com/base-images/pause-amd64 #推送至harbor倉庫
[root@k8s-master1 defaults]# pwd /etc/ansible/roles/kube-node/defaults [root@k8s-master1 defaults]# vim main.yml # 默認使用kube-proxy的 'iptables' 模式,可選 'ipvs' 模式(experimental) PROXY_MODE: "iptables" # 基礎容器鏡像 SANDBOX_IMAGE: "www.harbor.com/base-images/pause-amd64:3.1" #本地的harbor鏡像地址 #SANDBOX_IMAGE: "registry.access.redhat.com/rhel7/pod-infrastructure:latest" # Kubelet 根目錄 KUBELET_ROOT_DIR: "/var/lib/kubelet" # node節點最大pod 數 MAX_PODS: 110
[root@k8s-master1 ansible]# ansible-playbook 05.kube-node.yml
TASK [kube-node : 下載 kubelet,kube-proxy 二進制和基礎 cni plugins] *****
ok: [192.168.30.61] => (item=kubectl)
ok: [192.168.30.60] => (item=kubectl)
ok: [192.168.30.61] => (item=kubelet)
ok: [192.168.30.60] => (item=kubelet)
ok: [192.168.30.61] => (item=kube-proxy)
ok: [192.168.30.60] => (item=kube-proxy)
ok: [192.168.30.61] => (item=bridge)
ok: [192.168.30.60] => (item=bridge)
ok: [192.168.30.61] => (item=host-local)
ok: [192.168.30.60] => (item=host-local)
ok: [192.168.30.61] => (item=loopback)
ok: [192.168.30.60] => (item=loopback)web
TASK [kube-node : 分發 kubeconfig配置文件] ************************************************************************************************************ ok: [192.168.30.60 -> 192.168.30.10] ok: [192.168.30.61 -> 192.168.30.10] TASK [kube-node : 添加 kubectl 命令自動補全] ************************************************************************************************************ ok: [192.168.30.60] ok: [192.168.30.61] TASK [kube-node : 分發證書相關] *********************************************************************************************************************** ok: [192.168.30.60 -> 192.168.30.10] => (item=ca.pem) ok: [192.168.30.61 -> 192.168.30.10] => (item=ca.pem) ok: [192.168.30.60 -> 192.168.30.10] => (item=ca-key.pem) ok: [192.168.30.61 -> 192.168.30.10] => (item=ca-key.pem) ok: [192.168.30.60 -> 192.168.30.10] => (item=ca.csr) ok: [192.168.30.61 -> 192.168.30.10] => (item=ca.csr) ok: [192.168.30.60 -> 192.168.30.10] => (item=ca-config.json) ok: [192.168.30.61 -> 192.168.30.10] => (item=ca-config.json) TASK [kube-node : 準備kubelet 證書籤名請求] ************************************************************************************************************* ok: [192.168.30.60] ok: [192.168.30.61] TASK [kube-node : 建立 kubelet 證書與私鑰] ************************************************************************************************************* changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 設置集羣參數] *********************************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 設置客戶端認證參數] ******************************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 設置上下文參數] ********************************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 選擇默認上下文] ********************************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 移動 kubelet.kubeconfig] ******************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 準備 cni配置文件] ******************************************************************************************************************* ok: [192.168.30.60] ok: [192.168.30.61] TASK [kube-node : 建立kubelet的systemd unit文件] ***************************************************************************************************** ok: [192.168.30.60] ok: [192.168.30.61] TASK [kube-node : 開機啓用kubelet 服務] *************************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 開啓kubelet 服務] ***************************************************************************************************************** changed: [192.168.30.61] changed: [192.168.30.60] TASK [kube-node : 安裝kube-proxy.kubeconfig配置文件] ************************************************************************************************** ok: [192.168.30.60 -> 192.168.30.10] ok: [192.168.30.61 -> 192.168.30.10] TASK [kube-node : 建立kube-proxy 服務文件] ************************************************************************************************************ ok: [192.168.30.60] ok: [192.168.30.61] TASK [kube-node : 開機啓用kube-proxy 服務] ************************************************************************************************************ changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 開啓kube-proxy 服務] ************************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 輪詢等待kubelet啓動] **************************************************************************************************************** changed: [192.168.30.60] changed: [192.168.30.61] TASK [kube-node : 輪詢等待node達到Ready狀態] ************************************************************************************************************ changed: [192.168.30.60 -> 192.168.30.10] changed: [192.168.30.61 -> 192.168.30.10] TASK [kube-node : 設置node節點role] ***************************************************************************************************************** changed: [192.168.30.61 -> 192.168.30.10] changed: [192.168.30.60 -> 192.168.30.10] PLAY RECAP ************************************************************************************************************************************** 192.168.30.60 : ok=24 changed=13 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 192.168.30.61 : ok=24 changed=13 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
[root@k8s-master1 ansible]# kubectl get nodes NAME STATUS ROLES AGE VERSION 192.168.30.10 Ready,SchedulingDisabled master 4h29m v1.13.5 192.168.30.60 Ready node 4h28m v1.13.5 192.168.30.61 Ready node 4h28m v1.13.5
[root@k8s-master1 calico]# pwd /data/calico [root@k8s-master1 calico]# ll total 95948 -rw-r--r-- 1 root root 98247376 Jul 10 18:44 release-v3.3.2.tgz
[root@k8s-master1 release-v3.3.2]# ll
[root@k8s-master1 release-v3.3.2]# ll images/
[root@k8s-master1 release-v3.3.2]# ll bin
docker
[root@k8s-master1 release-v3.3.2]# cp bin/calicoctl /etc/ansible/bin cp: overwrite ‘/etc/ansible/bin/calicoctl’? y
[root@k8s-master1 release-v3.3.2]# /etc/ansible/bin/calicoctl version
[root@k8s-master1 release-v3.3.2]# docker load -i images/calico-node.tar #calico-node加載到本地 [root@k8s-master1 release-v3.3.2]# docker tag calico/node:v3.3.2 www.harbor.com/base-images/calico-node:v3.3.2 #打上tag號 [root@k8s-master1 release-v3.3.2]# docker push www.harbor.com/base-images/calico-node:v3.3.2 #上傳calico-node鏡像 [root@k8s-master1 release-v3.3.2]# docker load -i images/calico-cni.tar #calico-cni加載到本地 [root@k8s-master1 release-v3.3.2]# docker tag calico/cni:v3.3.2 www.harbor.com/base-images/calico-cni:v3.3.2 #打上tag號 [root@k8s-master1 release-v3.3.2]# docker push www.harbor.com/base-images/calico-cni:v3.3.2 #上傳calico-cni鏡像 [root@k8s-master1 release-v3.3.2]# docker load -i images/calico-kube-controllers.tar #calico-kube-controllers加載到本地 [root@k8s-master1 release-v3.3.2]# docker tag calico/kube-controllers:v3.3.2 www.harbor.com/base-images/calico-kube-controllers:v3.3.2 #打上tag號 [root@k8s-master1 release-v3.3.2]# docker push www.harbor.com/base-images/calico-kube-controllers:v3.3.2 #上傳calico-kube-controllers鏡像
[root@k8s-master1 defaults]# pwd /etc/ansible/roles/calico/defaults [root@k8s-master1 defaults]# vim main.yml
[root@k8s-master1 templates]# pwd /etc/ansible/roles/calico/templates [root@k8s-master1 templates]# vim calico-v3.3.yaml.j2
[root@k8s-master1 templates]# vim calico-v3.3.yaml.j2
[root@k8s-master1 templates]# vim calico-v3.3.yaml.j2
[root@k8s-master1 ansible]# ansible-playbook 06.network.yml
TASK [calico : 建立calico 證書請求] **
ok: [192.168.30.10 -> 192.168.30.10]json
TASK [calico : 建立 calico證書和私鑰] ***************************************************************************** changed: [192.168.30.10 -> 192.168.30.10] TASK [calico : get calico-etcd-secrets info] *************************************************************** changed: [192.168.30.10 -> 192.168.30.10] TASK [calico : 配置 calico DaemonSet yaml文件] ***************************************************************** ok: [192.168.30.10 -> 192.168.30.10] TASK [calico : 檢查是否已下載離線calico鏡像] ************************************************************************** changed: [192.168.30.10] TASK [calico : node 節點建立calico 相關目錄] *********************************************************************** ok: [192.168.30.60] => (item=/etc/calico/ssl) ok: [192.168.30.10] => (item=/etc/calico/ssl) ok: [192.168.30.61] => (item=/etc/calico/ssl) ok: [192.168.30.10] => (item=/etc/cni/net.d) ok: [192.168.30.60] => (item=/etc/cni/net.d) ok: [192.168.30.60] => (item=/opt/kube/images) ok: [192.168.30.61] => (item=/etc/cni/net.d) ok: [192.168.30.10] => (item=/opt/kube/images) ok: [192.168.30.61] => (item=/opt/kube/images) TASK [calico : 獲取calico離線鏡像推送狀況] *************************************************************************** changed: [192.168.30.60] changed: [192.168.30.10] changed: [192.168.30.61] TASK [calico : 運行 calico網絡] ******************************************************************************** changed: [192.168.30.10 -> 192.168.30.10] TASK [calico : 刪除默認cni配置] ********************************************************************************** ok: [192.168.30.60] ok: [192.168.30.61] ok: [192.168.30.10] TASK [calico : 下載calicoctl 客戶端] **************************************************************************** ok: [192.168.30.61] => (item=calicoctl) ok: [192.168.30.60] => (item=calicoctl) ok: [192.168.30.10] => (item=calicoctl) TASK [calico : 分發 calico 證書] ******************************************************************************* ok: [192.168.30.10 -> 192.168.30.10] => (item=calico.pem) changed: [192.168.30.60 -> 192.168.30.10] => (item=calico.pem) changed: [192.168.30.61 -> 192.168.30.10] => (item=calico.pem) ok: [192.168.30.10 -> 192.168.30.10] => (item=calico-key.pem) changed: [192.168.30.61 -> 192.168.30.10] => (item=calico-key.pem) changed: [192.168.30.60 -> 192.168.30.10] => (item=calico-key.pem) TASK [calico : 準備 calicoctl配置文件] *************************************************************************** ok: [192.168.30.61] ok: [192.168.30.60] ok: [192.168.30.10] TASK [calico : 輪詢等待calico-node 運行,視下載鏡像速度而定] *************************************************************** changed: [192.168.30.10 -> 192.168.30.10] changed: [192.168.30.61 -> 192.168.30.10] changed: [192.168.30.60 -> 192.168.30.10] PLAY RECAP ************************************************************************************************* 192.168.30.10 : ok=15 changed=6 unreachable=0 failed=0 skipped=43 rescued=0 ignored=0 192.168.30.60 : ok=8 changed=3 unreachable=0 failed=0 skipped=25 rescued=0 ignored=0 192.168.30.61 : ok=8 changed=3 unreachable=0 failed=0 skipped=25 rescued=0 ignored=0
[root@k8s-node-1 ~]# calicoctl version Client Version: v3.3.2 Build date: 2018-12-03T15:10:51+0000 Git commit: 594fd84e Cluster Version: v3.3.2 Cluster Type: k8s,bgp
[root@k8s-node-1 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE www.harbor.com/base-images/calico-node v3.3.2 4e9be81e3a59 8 months ago 75.3MB www.harbor.com/base-images/calico-cni v3.3.2 490d921fa49c 8 months ago 75.4MB www.harbor.com/base-images/pause-amd64 3.1 da86e6ba6ca1 20 months ago 742kB
[root@k8s-node-1 ~]# calicoctl node status Calico process is running. IPv4 BGP status +---------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +---------------+-------------------+-------+----------+-------------+ | 192.168.30.10 | node-to-node mesh | up | 22:25:47 | Established | | 192.168.30.61 | node-to-node mesh | up | 22:33:37 | Established | +---------------+-------------------+-------+----------+-------------+
[root@k8s-master1 ansible]# pwd /etc/ansible [root@k8s-master1 ansible]# vim hosts …… [kube-node] 192.168.30.60 192.168.30.61 [new-node] # 預留組,後續添加node節點使用 192.168.30.62 #添加的新node節點 ……
root@k8s-master1:/etc/ansible# vim 20.addnode.yml
[root@k8s-master1 ansible]# ansible-playbook 20.addnode.yml
……
PLAY [deploy] **ubuntu
TASK [Gathering Facts] ************************************************************************************* ok: [192.168.30.10] TASK [rm new-node in ansible hosts] ************************************************************************ changed: [192.168.30.10] PLAY RECAP ************************************************************************************************* 192.168.30.10 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 192.168.30.62 : ok=70 changed=60 unreachable=0 failed=0 skipped=58 rescued=0 ignored=0
[root@k8s-master1 ansible]# vim hosts ….. [kube-master] 192.168.30.10 [new-master] # 預留組,後續添加master節點使用 192.168.30.11 #添加的新master節點 ……
root@k8s-master1:/etc/ansible# vim 21.addmaster.yml
[root@k8s-master1 ansible]# ansible-playbook 21.addmaster.yml
……
PLAY [deploy] ****vim
TASK [Gathering Facts] *************************************************************************** ok: [192.168.30.10] TASK [rm new-master in ansible hosts] ************************************************************ changed: [192.168.30.10] PLAY RECAP *************************************************************************************** 192.168.30.10 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 192.168.30.11 : ok=89 changed=46 unreachable=0 failed=0 skipped=55 rescued=0 ignored=0
[root@k8s-master1 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION 92.168.30.10 Ready,SchedulingDisabled master 56m v1.13.5 192.168.30.11 Ready,SchedulingDisabled master 19m v1.13.5 #新添加的master節點 192.168.30.60 Ready node 52m v1.13.5 192.168.30.61 Ready node 52m v1.13.5 192.168.30.62 Ready node 30m v1.13.5 #新添加的node節點
[root@k8s-master1 ~]# kubectl run test-pod --image=alpine --replicas=4 sleep 360000 kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
[root@k8s-master1 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES test-pod-9569f557-kq54m 1/1 Running 0 2m 172.20.76.158 192.168.30.62 <none> <none> test-pod-9569f557-sh4pp 1/1 Running 0 2m 172.20.140.82 192.168.30.61 <none> <none> test-pod-9569f557-sqr77 1/1 Running 0 2m 172.20.109.81 192.168.30.60 <none> <none> test-pod-9569f557-wlmgf 1/1 Running 0 2m 172.20.76.159 192.168.30.62 <none> <none>
[root@k8s-master1 ~]# kubectl exec -it test-pod-9569f557-kq54m sh / #
/ # ifconfig eth0 Link encap:Ethernet HWaddr 36:25:BB:AD:A1:A3 inet addr:172.20.76.158 Bcast:0.0.0.0 Mask:255.255.255.255 #當前pod的ip …… 5)在此容器ping另外兩個node節點中的pod進行測試,能夠ping通,實現跨節點間的pod容器通訊 / # ping 172.20.140.82 PING 172.20.140.82 (172.20.140.82): 56 data bytes 64 bytes from 172.20.140.82: seq=0 ttl=62 time=0.509 ms 64 bytes from 172.20.140.82: seq=1 ttl=62 time=0.614 ms 64 bytes from 172.20.140.82: seq=2 ttl=62 time=0.386 ms …… / # ping 172.20.109.81 PING 172.20.109.81 (172.20.109.81): 56 data bytes 64 bytes from 172.20.109.81: seq=0 ttl=62 time=0.584 ms 64 bytes from 172.20.109.81: seq=1 ttl=62 time=0.678 ms 64 bytes from 172.20.109.81: seq=2 ttl=62 time=0.704 ms
[root@k8s-master1 dashboard]# mkdir 1.10.1 [root@k8s-master1 dashboard]# cd 1.10.1 [root@k8s-master1 1.10.1]# pwd /etc/ansible/manifests/dashboard/1.10.1
[root@k8s-master1 1.10.1]# cp ../*.yaml ./ [root@k8s-master1 1.10.1]# ll total 24 -rw-r--r-- 1 root root 357 Aug 21 11:13 admin-user-sa-rbac.yaml -rw-r--r-- 1 root root 4761 Aug 21 11:13 kubernetes-dashboard.yaml -rw-r--r-- 1 root root 2223 Aug 21 11:13 read-user-sa-rbac.yaml -rw-r--r-- 1 root root 458 Aug 21 11:13 ui-admin-rbac.yaml -rw-r--r-- 1 root root 477 Aug 21 11:13 ui-read-rbac.yaml
https://github.com/kubernetes/dashboard/releases/tag/v1.10.1
[root@k8s-master1 1.10.1]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
[root@k8s-master1 1.10.1]# docker images
[root@k8s-master1 1.10.1]# docker tag f9aed6605b81 www.harbor.com/base-images/kubernetes-dashboard-amd64:v1.10.1
docker push www.harbor.com/base-images/kubernetes-dashboard-amd64:v1.10.1 #推送至本地harbor倉庫
[root@k8s-master1 1.10.1]# vim kubernetes-dashboard.yaml …… containers: - name: kubernetes-dashboard #image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 image: www.harbor.com/base-images/kubernetes-dashboard-amd64:v1.10.1 ports: - containerPort: 8443 protocol: TCP ……
[root@k8s-master1 1.10.1]# kubectl create -f ./ serviceaccount/admin-user created clusterrolebinding.rbac.authorization.k8s.io/admin-user created secret/kubernetes-dashboard-certs created serviceaccount/kubernetes-dashboard created role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created deployment.apps/kubernetes-dashboard created service/kubernetes-dashboard created serviceaccount/dashboard-read-user created clusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created clusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created clusterrole.rbac.authorization.k8s.io/ui-admin created rolebinding.rbac.authorization.k8s.io/ui-admin-binding created clusterrole.rbac.authorization.k8s.io/ui-read created rolebinding.rbac.authorization.k8s.io/ui-read-binding created
[root@k8s-master1 1.10.1]# kubectl get pod -n kube-system NAME READY STATUS RESTARTS AGE calico-kube-controllers-854875cc8-bvvsv 1/1 Running 0 10m calico-node-2cb59 2/2 Running 0 10m calico-node-2ptr5 2/2 Running 0 10m calico-node-7fh5s 2/2 Running 0 10m calico-node-cxzxp 2/2 Running 0 10m calico-node-fc8z8 2/2 Running 0 10m kubernetes-dashboard-fbfff599f-gfrgd 1/1 Running 0 26s #kubernetes-dashboard容器已經運行
root@k8s-master1:/etc/ansible# kubectl cluster-info Kubernetes master is running at https://192.168.30.24:6443 kubernetes-dashboard is running at https://192.168.30.24:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy root@k8s-master1:/etc/ansible# kubectl cluster-info
oot@k8s-master1:/etc/ansible# kubectl get secret -n kube-system | grep admin-user admin-user-token-6qcq8 kubernetes.io/service-account-token 3 47m
root@k8s-master1:/etc/ansible# kubectl get secret -n kube-system | grep admin-user …… ca.crt: 1346 bytes namespace: 11 bytes token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZxY3E4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkNzkzODViZS1jNjI2LTExZTktOTNkZC0wMDBjMjkyMDFkYzciLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.F7-fD5vNoQtXA22R6dIPFww9LPQpfBBkMqJMys6mnCcjDyqCoy3kcO9gIwm94aNkhOX7ZSABzbfTmk7RKo67I08NtbzpGNUPKWDqAz1uBkOk7grbjfqwl7SUpbu7PhuNvQbsE7MTnX1tsjSjWxwb_6lALwiiPIRArIY_kMVVStD2lGknK5je1mxw2A-GzgtXTB6BMIuKZ3EbjGcpeIIEOmItrsyfG0pOBpyo8qkJmzVPM9IfEKP8ZoEajANgYmzfgMu9fD5nidjz3MKg9tlhdWeIC_YgLlN9jVJRuA7RdDu6qanRlpUC-XGIMGyWdQxjBh0xDi4jtdgSHplziYJYUA
root@k8s-master1:~# vim /etc/kubernetes/kubelet.kubeconfig
root@k8s-master1:/etc/ansible/manifests# mkdir DNS/{kube-dns,core-dns} -p root@k8s-master1:/etc/ansible/manifests/DNS# ls core-dns kube-dns
oot@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# pwd /etc/ansible/manifests/DNS/kube-dns
root@k8s-master1:/data/src# tar xvf kubernetes.tar.gz root@k8s-master1:/data/src# ls kubernetes kubernetes.tar.gz
root@k8s-master1:/data/src/kubernetes/cluster/addons/dns/kube-dns# pwd /data/src/kubernetes/cluster/addons/dns/kube-dns root@k8s-master1:/data/src/kubernetes/cluster/addons/dns/kube-dns# ll total 40 drwxr-xr-x 2 root root 167 Mar 25 16:06 ./ drwxr-xr-x 5 root root 71 Mar 25 16:06 ../ -rw-r--r-- 1 root root 6284 Mar 25 16:06 kube-dns.yaml.base -rw-r--r-- 1 root root 6362 Mar 25 16:06 kube-dns.yaml.in -rw-r--r-- 1 root root 6290 Mar 25 16:06 kube-dns.yaml.sed -rw-r--r-- 1 root root 1077 Mar 25 16:06 Makefile -rw-r--r-- 1 root root 1954 Mar 25 16:06 README.md -rw-r--r-- 1 root root 308 Mar 25 16:06 transforms2salt.sed -rw-r--r-- 1 root root 266 Mar 25 16:06 transforms2sed.sed
root@k8s-master1:/data/src/kubernetes/cluster/addons/dns/kube-dns# cp kube-dns.yaml.base /etc/ansible/manifests/DNS/kube-dns/
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# ls kube-dns.yaml.base
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:1.14.13 #拉取k8s-dns-kube-dns鏡像 root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag 55a3c5209c5e www.harbor.com/base-images/k8s-dns-kube-dns-amd64:v1.14.13 #打上tag號 root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker push www.harbor.com/base-images/k8s-dns-kube-dns-amd64:v1.14.13 #上傳至harbor倉庫
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13 #拉取k8s-dns-dnsmasq-nanny鏡像 root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag 6dc8ef8287d3 www.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:v1.14.13 #打上tag號 root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker push www.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:v1.14.13 #上傳至harbor倉庫
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:1.14.13 #拉取k8s-dns-sidecar鏡像 root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag 4b2e93f0133d www.harbor.com/base-images/k8s-dns-sidecar-amd64:v1.14.13 #打上tag號 root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker push www.harbor.com/base-images/k8s-dns-sidecar-amd64:v1.14.13 #上傳至harbor倉庫
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# vim kube-dns.yaml …… containers: - name: kubedns image: www.harbor.com/base-images/k8s-dns-kube-dns-amd64:v1.14.13 resources: …… # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: memory: 4Gi requests: cpu: 2 memory: 2Gi …… args: - --domain=jie.local. #服務域名後綴 - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 env: - name: PROMETHEUS_PORT value: "10055" …… - name: dnsmasq image: www.harbor.com/base-images/k8s-dns-dnsmasq-nanny-amd64:v1.14.13 livenessProbe: …… args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --no-negcache - --dns-loop-detect - --log-facility=- - --server=/jie.local/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 …… - name: sidecar image: www.harbor.com/base-images/k8s-dns-sidecar-amd64:v1.14.13 livenessProbe: …… args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.jie.local,5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.jie.local,5,SRV ……
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl create -f kube-dns.yaml service/kube-dns created serviceaccount/kube-dns created configmap/kube-dns created deployment.extensions/kube-dns created
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl get pod -n kube-system NAME READY STATUS RESTARTS AGE calico-kube-controllers-854875cc8-bvvsv 1/1 Running 1 10h calico-node-2cb59 1/2 Running 2 10h calico-node-2ptr5 1/2 Running 2 10h calico-node-7fh5s 1/2 Running 2 10h calico-node-cxzxp 2/2 Running 0 10h calico-node-fc8z8 1/2 Running 2 10h kube-dns-69dcdbc668-xbfws 3/3 Running 0 69s #kube-dns服務一個pod中包含三個服務 kubernetes-dashboard-fbfff599f-gfrgd 1/1 Running 1 10h
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# ls busybox.yaml kube-dns.yaml
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker pull busybox root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# docker tag db8ee88ad75f www.harbor.com/base-images/docker:latest
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl create -f busybox.yaml pod/busybox created
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl get pod NAME READY STATUS RESTARTS AGE busybox 1/1 Running 0 24m
root@k8s-master1:/etc/ansible/manifests/DNS/kube-dns# kubectl exec busybox nslookup kubernetes
root@k8s-master1:/etc/ansible/manifests/heapster# ls grafana.yaml heapster.yaml influxdb-v1.1.1 influxdb-with-pv influxdb.yaml
root@k8s-master1:/etc/ansible/manifests/heapster# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-grafana-amd64:v4.4.3 #拉取heapster-grafana鏡像 root@k8s-master1:/etc/ansible/manifests/heapster# docker tag 8cb3de219af7 www.harbor.com/base-images/heapster-grafana-amd64:v4.4.3 #將此鏡像打上tag號,便於ban蹦區分 root@k8s-master1:/etc/ansible/manifests/heapster# docker push www.harbor.com/base-images/heapster-grafana-amd64:v4.4.3 #將此鏡像推送至本地倉庫
root@k8s-master1:/etc/ansible/manifests/heapster# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-amd64:v1.5.4 #拉取heapster鏡像 root@k8s-master1:/etc/ansible/manifests/heapster# docker tag 72d68eecf40c www.harbor.com/base-images/heapster-amd64:v1.5.4 #將此鏡像打上tag號,便於ban蹦區分 root@k8s-master1:/etc/ansible/manifests/heapster# docker push www.harbor.com/base-images/heapster-amd64:v1.5.4 #將此鏡像推送至本地倉庫
root@k8s-master1:/etc/ansible/manifests/heapster# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-influxdb-amd64:v1.3.3 #拉取pster-influxdb鏡像 root@k8s-master1:/etc/ansible/manifests/heapster# docker tag 577260d221db www.harbor.com/base-images/heapster-influxdb-amd64:v1.3.3 #將此鏡像打上tag號,便於ban蹦區分 root@k8s-master1:/etc/ansible/manifests/heapster# docker push www.harbor.com/base-images/heapster-influxdb-amd64:v1.3.3 #將此鏡像推送至本地倉庫
root@k8s-master1:/etc/ansible/manifests/heapster# cat grafana.yaml heapster.yaml influxdb.yaml …… spec: containers: - name: grafana #image: gcr.io/google_containers/heapster-grafana-amd64:v4.2.0 image: www.harbor.com/base-images/heapster-grafana-amd64:v4.4.3 ports: - containerPort: 3000 protocol: TCP …… …… containers: - name: heapster #image: gcr.io/google_containers/heapster-amd64:v1.5.4 image: www.harbor.com/base-images/heapster-amd64:v1.5.4 imagePullPolicy: IfNotPresent command: …… containers: - name: influxdb #image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3 image: www.harbor.com/base-images/heapster-influxdb-amd64:v1.3.3 volumeMounts: - mountPath: /data name: influxdb-storage ……
root@k8s-master1:/etc/ansible/manifests/heapster# kubectl apply -f .
root@k8s-master1:/etc/ansible/manifests/heapster# kubectl get pod -n kube-system
root@k8s-master1:/etc/ansible/manifests/heapster# kubectl cluster-info