kubernetes(k8s):部署高可用集羣


Kubernetes 集羣,在生產環境,必須實現高可用:

實現Master節點及其核心組件的高可用;若是Master節點出現問題的話,那整個集羣就失去了控制;node

具體工做原理:linux

  • etcd 集羣:部署了3個Master節點,每一個Master節點的etcd組成集羣入口
  • 集羣:3個Master節點上的APIServer的前面放一個負載均衡器,工做節點和客戶端經過這個負載均衡和APIServer進行通訊
  • pod-master保證僅是主master可用,scheduler、controller-manager在集羣中多個實例只有一個工做,其餘爲備用

背景:兩種高可用集羣架構

在這裏插入圖片描述2.採用分佈式存儲etcd,分別用三臺主機節點做爲etcd
在這裏插入圖片描述web

1.實驗環境

  • 從新準備server二、server三、server4,三臺虛擬機,分別安裝haproxy和keepalived

安裝keepalivedhaproxykeepalived用來監控集羣中各個服務節點的狀態heproxy是一個適合於高可用性環境的tcp/http開元的反向代理和負載均衡軟件docker

1.1 Loadbalancer部署:三臺主機分別配置haproxy,keepalived

[root@server2 ~]# yum install -y haproxy keepalived -y
[root@server3 ~]# yum install -y haproxy keepalived -y
[root@server4 ~]# yum install -y haproxy keepalived -y

[root@server2 haproxy]# pwd
/etc/haproxy
[root@server2 haproxy]# vim haproxy.cfg 
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend  main *:8443    // apiserver監聽加密的6443端口,因此指定的監聽端口不能與apiserver的端口衝突
    mode tcp  // 使用tcp模式轉發
    default_backend             apiserver  // 後端爲apiserver,k8s集羣接口
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend apiserver
    mode tcp        // tcp模式
    balance     roundrobin
    server  app1 172.25.60.2:6443 check     //後端server,均爲k8s集羣中master節點
    server  app2 172.25.60.3:6443 check
    server  app3 172.25.60.4:6443 check
[root@server2 ~]# systemctl start haproxy
[root@server2 ~]# systemctl enable haproxy    //開機自啓

[root@server2 ~]# netstat -antlpe|grep haproxy
tcp        0      0 0.0.0.0:8443            0.0.0.0:*               LISTEN      0          31732      3668/haproxy 


[root@server2 keepalived]# pwd
/etc/keepalived
[root@server2 keepalived]# cat check_haproxy.sh   //編寫腳本文件check_haproxy.sh,檢測haproxy狀態,將haproxy與keepalived聯繫起來
#!/bin/bash
systemctl status haproxy &> /dev/null  
if [ $? != 0 ];then
	systemctl stop keepalived
fi
[root@server2 keepalived]# chmod +x check_haproxy.sh     //加執行權限
[root@server2 keepalived]# ./check_haproxy.sh        //測試是否能夠正常運行

[root@server2 keepalived]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
    script "/etc/keepalived/check_haproxy.sh"  // 放入檢測腳本
    interval 5      //時間間隔5s
}

vrrp_instance VI_1 {
state MASTER  
    interface eth0
    virtual_router_id 51
    priority 100  // 備機設備優先級 (數值越大優先級越高)
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        check_haproxy
    }
    virtual_ipaddress {
        172.25.60.100      //VIP
   }
} 

[root@server2 keepalived]# systemctl stop haproxy  // 中止haproxy
[root@server2 keepalived]# systemctl status keepalived  // keepalived由於腳本的執行也會中止
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: inactive (dead) since Thu 2020-05-14 17:06:47 CST; 8s ago

[root@server2 keepalived]# systemctl start haproxy
[root@server2 keepalived]# systemctl start keepalived
[root@server2 keepalived]# ssh-keygen  // 三臺master節點主機,作免密
[root@server2 keepalived]# ssh-copy-id server3
[root@server2 keepalived]# ssh-copy-id server4

[root@server2 keepalived]# scp /etc/haproxy/haproxy.cfg server3:/etc/haproxy/
haproxy.cfg                                                                                                     100% 2658     3.0MB/s   00:00    
[root@server2 keepalived]# scp /etc/haproxy/haproxy.cfg server4:/etc/haproxy/
haproxy.cfg                                                                                                     100% 2658     3.1MB/s   00:00    
[root@server2 keepalived]# scp /etc/keepalived/keepalived.conf server3:/etc/keepalived/
keepalived.conf                                                                                                 100%  607   913.9KB/s   00:00    
[root@server2 keepalived]# scp /etc/keepalived/keepalived.conf server4:/etc/keepalived/
keepalived.conf
[root@server3 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
}

vrrp_instance VI_1 {
    state BACKUP      // 備機設置爲backup
    interface eth0
    virtual_router_id 51
    priority 90       // 優先級下降
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        check_haproxy
    }
    virtual_ipaddress {
        172.25.60.100
   }
} 

[root@server3 ~]# systemctl start haproxy
[root@server3 ~]# systemctl enable haproxy
[root@server3 ~]# systemctl start keepalived
[root@server3 ~]# systemctl enable keepalived
[root@server4 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
}

vrrp_instance VI_1 {
    state BACKUP        //改成backup備機
    interface eth0
    virtual_router_id 51
    priority 80         //下降優先級
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        check_haproxy
    }
    virtual_ipaddress {
        172.25.60.100
   }
} 

[root@server4 ~]# systemctl start haproxy
[root@server4 ~]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@server4 ~]# systemctl start keepalived
[root@server4 ~]# systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.

1.2 分別部署k8s,docker

(在全部節點都要作)
時間同步(此處省略)json

[root@server2 ~]# cat /etc/yum.repos.d/docker-ce.repo // docker安裝的yum源
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[root@server2 ~]# yum install docker-ce container-selinux-2.77-1.el7.noarch.rpm -y # 須要container-selinux-2.77-1.el7.noarch.rpm包解決依賴性,自行下載

[root@server2 ~]# scp /etc/yum.repos.d/docker-ce.repo root@172.25.60.3:/etc/yum.repos.d/
docker-ce.repo                                                                                                  100%  213   145.9KB/s   00:00    
[root@server2 ~]# scp /etc/yum.repos.d/docker-ce.repo root@172.25.60.4:/etc/yum.repos.d/
docker-ce.repo
[root@server2 ~]# scp container-selinux-2.77-1.el7.noarch.rpm server3:/root/
container-selinux-2.77-1.el7.noarch.rpm                                                                         100%   37KB  12.4MB/s   00:00    
[root@server2 ~]# scp container-selinux-2.77-1.el7.noarch.rpm server4:/root/
container-selinux-2.77-1.el7.noarch.rpm
[root@server3 ~]# yum install docker-ce container-selinux-2.77-1.el7.noarch.rpm -y
[root@server4 ~]# yum install docker-ce container-selinux-2.77-1.el7.noarch.rpm -y

[root@server2 ~]# systemctl start docker
[root@server2 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@server2 ~]# docker info
//若是出現一下錯誤,執行下面的排錯步驟

排錯:
此處可參考k8s部署的博客https://blog.csdn.net/weixin_43936969/article/details/105773756#3_Kubernetes_57vim

[root@server2 sysctl.d]# pwd
/etc/sysctl.d
[root@server2 sysctl.d]# vim k8s.conf
[root@server2 sysctl.d]# cat k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@server2 sysctl.d]# sysctl --syste  // 使其生效

將cgroupdriver修改成system的方式:後端

[root@server2 docker]# pwd
/etc/docker
[root@server2 docker]# cat daemon.json 
{
  "exec-opts": ["native.cgroupdriver=systemd"],  // 修改成system的方式
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
[root@server2 docker]# systemctl restart docker

[root@server2 docker]# scp /etc/sysctl.d/k8s.conf server3:/etc/sysctl.d/
k8s.conf                                                                                                        100%   79    77.0KB/s   00:00    
[root@server2 docker]# scp /etc/sysctl.d/k8s.conf server4:/etc/sysctl.d/
k8s.conf

[root@server3 ~]# systemctl start docker
[root@server3 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@server4 ~]# systemctl start docker
[root@server4 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

[root@server2 docker]# scp /etc/docker/daemon.json server3:/etc/docker/
daemon.json                                                                                                     100%  224    18.0KB/s   00:00    
[root@server2 docker]# scp /etc/docker/daemon.json server4:/etc/docker/
daemon.json 
[root@server3 ~]# systemctl restart docker
[root@server4 ~]# systemctl restart docker

禁用swap:centos

[root@server2 ~]# swapoff -a  // 禁用swap
[root@server2 ~]# cat /etc/fstab 
# /dev/mapper/rhel-swap   swap                    swap    defaults        0 0
[root@server3 ~]# swapoff -a
[root@server3 ~]# vim /etc/fstab 
# /dev/mapper/rhel-swap   swap                    swap    defaults        0 0
[root@server4 ~]# swapoff -a
[root@server4 ~]# vim /etc/fstab 
# /dev/mapper/rhel-swap   swap                    swap    defaults        0 0
[root@server2 ~]# cat /etc/yum.repos.d/k8s.repo // kubernetes的yum源
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
[root@server2 ~]# scp /etc/yum.repos.d/k8s.repo server3:/etc/yum.repos.d/
k8s.repo                                                                                                        100%  129    18.6KB/s   00:00    
[root@server2 ~]# scp /etc/yum.repos.d/k8s.repo server4:/etc/yum.repos.d/
k8s.repo 

[root@server2 ~]# yum install -y kubelet kubeadm kubectl
[root@server3 ~]# yum install -y kubelet kubeadm kubectl
[root@server4 ~]# yum install -y kubelet kubeadm kubectl
[root@server2 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@server3 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@server4 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@server2 ~]# kubeadm config images list  // 查看所用鏡像,都是從ks8官網下載
W0514 22:34:13.033279   16384 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
k8s.gcr.io/kube-apiserver:v1.18.2
k8s.gcr.io/kube-controller-manager:v1.18.2
k8s.gcr.io/kube-scheduler:v1.18.2
k8s.gcr.io/kube-proxy:v1.18.2
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7
[root@server2 ~]# kubeadm config print init-defaults  > kubeadm.yaml   //將k8s初始化配置文件導入成kubeadm.yaml,方便使用
[root@server2 ~]# vim kubeadm.yaml      //查看初始化配置文件 kubeadm.yaml
localAPIEndpoint:
  advertiseAddress: 172.25.60.2       
  bindPort: 6443

clusterName: kubernetes
controlPlaneEndpoint: "172.25.60.100:8443"
controllerManager: {}

imageRepository: registry.aliyuncs.com/google_containers // 鏡像倉庫,改成阿里雲提供的倉庫,方便下載鏡像

networking:        //網絡設置
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12

scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
[root@server2 ~]# modprobe ip_vs     //添加ipvs模塊
[root@server2 ~]# modprobe ip_vs_rr
[root@server2 ~]# modprobe ip_vs_wrr
[root@server2 ~]# modprobe ip_vs_sh
[root@server2 ~]# modprobe nf_conntrack_ipv4
[root@server2 ~]# lsmod |grep ip_vs     //查看模塊
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

[root@server3 ~]# modprobe ip_vs
[root@server3 ~]# modprobe ip_vs_rr
[root@server3 ~]# modprobe ip_vs_wrr
[root@server3 ~]# modprobe ip_vs_sh
[root@server3 ~]# modprobe nf_conntrack_ipv4
[root@server3 ~]# lsmod |grep ip_vs
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

[root@server4 ~]# modprobe ip_vs
[root@server4 ~]# modprobe ip_vs_rr
[root@server4 ~]# modprobe ip_vs_wrr
[root@server4 ~]# modprobe ip_vs_sh
[root@server4 ~]# modprobe nf_conntrack_ipv4
[root@server4 ~]# lsmod |grep ip_vs
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
[root@server2 ~]# kubeadm config images pull --config kubeadm.yaml  // 預先拉取鏡像
[root@server2 ~]# docker images     //查看鏡像
REPOSITORY                                                        TAG                 IMAGE ID            CREATED             SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.18.2             0d40868643c6        4 weeks ago         117MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.18.2             a3099161e137        4 weeks ago         95.3MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.18.2             6ed75ad404bd        4 weeks ago         173MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.18.2             ace0a8c17ba9        4 weeks ago         162MB
registry.aliyuncs.com/google_containers/pause                     3.2                 80d28bedfe5d        2 months ago        683kB
registry.aliyuncs.com/google_containers/coredns                   1.6.7               67da37a9a360        3 months ago        43.8MB
registry.aliyuncs.com/google_containers/etcd                      3.4.3-0             303ce5db0e90        6 months ago        288MB
[root@server2 ~]# docker save -o k8s.tar `docker images|grep -v TAG|awk '{print $1":"$2}'`  // grep -v標識反選,不包含TAG,將拉取的k8s鏡像打包

1.3 將三臺主機均設置爲master

[root@server2 ~]# scp k8s.tar server3:/root/
k8s.tar                                                                                                         100%  693MB  12.3MB/s   00:56    
[root@server2 ~]# scp k8s.tar server4:/root/
k8s.tar           
[root@server3 ~]# docker load -i k8s.tar   //將鏡像包導入
[root@server4 ~]# docker load -i k8s.tar

[root@server2 ~]# kubeadm init   --config kubeadm.yaml --upload-certs  //kubeadm初始化,--upload-certs上傳證書,將證書共享出去
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:    //master節點加入命令:

  kubeadm join 172.25.60.100:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:8f7d2ab1a5e98aadbee8caba2426c79a48b17ce3a725d1bb56bc48624725e5d2 \
    --control-plane --certificate-key 245fba0c8185c4b06578dce61ab6c693bb058fbd551c9be2dc214333e8f06bfc

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:      //worker節點加入命令:

kubeadm join 172.25.60.100:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:8f7d2ab1a5e98aadbee8caba2426c79a48b17ce3a725d1bb56bc48624725e5d2

[root@server2 ~]# mkdir -p $HOME/.kube  // 我這裏沒用建立普通用戶,直接使用了root用戶
[root@server2 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@server2 ~]# kubectl get node
NAME      STATUS     ROLES    AGE     VERSION
server2   NotReady   master   2m30s   v1.18.2
[root@server3 ~]# kubeadm join 172.25.60.100:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:8f7d2ab1a5e98aadbee8caba2426c79a48b17ce3a725d1bb56bc48624725e5d2 \
--control-plane --certificate-key 245fba0c8185c4b06578dce61ab6c693bb058fbd551c9be2dc214333e8f06bfc 
// 將server3加入master節點
[root@server3 ~]# mkdir -p $HOME/.kube
[root@server3 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 
//只要有config就能夠操做集羣
[root@server3 ~]# kubectl get node
NAME      STATUS     ROLES    AGE     VERSION
server2   NotReady   master   7m30s   v1.18.2
server3   NotReady   master   119s    v1.18.2

[root@server4 ~]# kubeadm join 172.25.60.100:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:8f7d2ab1a5e98aadbee8caba2426c79a48b17ce3a725d1bb56bc48624725e5d2 \
>     --control-plane --certificate-key 245fba0c8185c4b06578dce61ab6c693bb058fbd551c9be2dc214333e8f06bfc
// 將server4加入master節點
[root@server4 ~]# kubectl get node
NAME      STATUS     ROLES    AGE     VERSION
server2   NotReady   master   13m     v1.18.2
server3   NotReady   master   7m53s   v1.18.2
server4   NotReady   master   3m57s   v1.18.2

1.4 安裝flannel網絡組件

導入flannel包flanneld-v0.12.0-amd64.dockerapi

//導入flannel包flanneld-v0.12.0-amd64.docker
[root@server2 ~]# docker load -i flanneld-v0.12.0-amd64.docker
[root@server2 ~]# scp flanneld-v0.12.0-amd64.docker server3:/root/
flanneld-v0.12.0-amd64.docker                                                                                   100%   51MB   8.1MB/s   00:06    
[root@server2 ~]# scp flanneld-v0.12.0-amd64.docker server4:/root/
flanneld-v0.12.0-amd64.docker
[root@server3 ~]# docker load -i flanneld-v0.12.0-amd64.docker
[root@server4 ~]# docker load -i flanneld-v0.12.0-amd64.docker
[root@server2 ~]# vim kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - arm64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - arm
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-ppc64le
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - ppc64le
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-s390x
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - s390x
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
[root@server2 ~]# kubectl apply -f kube-flannel.yml 
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@server2 ~]# kubectl get pod -n kube-system    //查看
NAME                              READY   STATUS    RESTARTS   AGE
coredns-7ff77c879f-2nndl          0/1     Running   0          29m
coredns-7ff77c879f-6cwlc          1/1     Running   0          29m
etcd-server2                      1/1     Running   0          29m
etcd-server3                      1/1     Running   0          24m
etcd-server4                      1/1     Running   0          20m
kube-apiserver-server2            1/1     Running   0          29m
kube-apiserver-server3            1/1     Running   0          24m
kube-apiserver-server4            1/1     Running   0          20m
kube-controller-manager-server2   1/1     Running   2          29m
kube-controller-manager-server3   1/1     Running   1          24m
kube-controller-manager-server4   1/1     Running   0          20m
kube-flannel-ds-amd64-6jjvw       1/1     Running   0          112s
kube-flannel-ds-amd64-h9sgg       1/1     Running   1          111s
kube-flannel-ds-amd64-jg7rk       1/1     Running   0          111s
kube-proxy-24vrj                  1/1     Running   0          29m
kube-proxy-9mthn                  1/1     Running   0          20m
kube-proxy-fjk78                  1/1     Running   0          24m
kube-scheduler-server2            1/1     Running   2          29m
kube-scheduler-server3            1/1     Running   0          24m
kube-scheduler-server4            1/1     Running   1          20m

補齊命令bash

[root@server2 ~]# yum install bash-* -y
[root@server2 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@server2 ~]# source .bashrc

[root@server1 yum.repos.d]# cat k8s.repo 
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
[root@server1 yum.repos.d]# yum install kubectl -y
[root@server2 ~]# scp -r .kube/ server1:/root/
[root@server1 yum.repos.d]# kubectl get node
NAME      STATUS   ROLES    AGE   VERSION
server2   Ready    master   38m   v1.18.2
server3   Ready    master   33m   v1.18.2
server4   Ready    master   29m   v1.18.2

1.5 再開啓一臺server5作worker

[root@server2 ~]# ssh-copy-id server5     //免密
[root@server2 ~]# scp /etc/yum.repos.d/docker-ce.repo server5:/etc/yum.repos.d/
docker-ce.repo                                                                                                  100%  213   237.3KB/s   00:00    
[root@server2 ~]# scp /etc/yum.repos.d/k8s.repo server5:/etc/yum.repos.d/
k8s.repo 
[root@server2 ~]# scp container-selinux-2.77-1.el7.noarch.rpm server5:~
container-selinux-2.77-1.el7.noarch.rpm                                                                         100%   37KB   7.9MB/s   00:00 
[root@server5 ~]# yum install docker-ce container-selinux-2.77-1.el7.noarch.rpm -y      //安裝docker
[root@server5 ~]# yum install -y kubelet kubeadm     

[root@server5 ~]# systemctl start docker
[root@server5 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@server2 ~]# scp /etc/docker/daemon.json server5:/etc/docker/
daemon.json
[root@server2 ~]# scp /etc/sysctl.d/k8s.conf server5:/etc/sysctl.d/
k8s.conf 

[root@server5 ~]# sysctl --system
[root@server5 ~]# systemctl restart docker
[root@server5 ~]# systemctl start kubelet
[root@server5 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

[root@server2 ~]# scp flanneld-v0.12.0-amd64.docker server5:~
flanneld-v0.12.0-amd64.docker                                                                                   100%   51MB  12.2MB/s   00:04
[root@server2 ~]# scp k8s.tar server5:~
k8s.tar                                                                                                         100%  693MB   6.3MB/s   01:50
[root@server5 ~]# docker load -i flanneld-v0.12.0-amd64.docker    //安裝網絡插件flannel
[root@server5 ~]# docker load -i k8s.tar     //導入k8s所需鏡像

[root@server5 ~]# modprobe ip_vs      //添加ipvs模塊
[root@server5 ~]# modprobe ip_vs_rr
[root@server5 ~]# modprobe ip_vs_wrr
[root@server5 ~]# modprobe ip_vs_sh
[root@server5 ~]# modprobe nf_conntrack_ipv4
[root@server5 ~]# lsmod |grep ip_vs
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

[root@server5 ~]# swapoff -a  // 關閉swap
[root@server5 ~]# vim /etc/fstab 
# /dev/mapper/rhel-swap   swap                    swap    defaults        0 0

[root@server5 ~]# kubeadm join 172.25.60.100:8443 --token abcdef.0123456789abcdef     --discovery-token-ca-cert-hash sha256:8f7d2ab1a5e98aadbee8caba2426c79a48b17ce3a725d1bb56bc48624725e5d2 
// 加入kubernetes集羣工做節點worker

將server5加入kubernetes集羣

[root@server1 yum.repos.d]# kubectl get node
NAME      STATUS   ROLES    AGE   VERSION
server2   Ready    master   77m   v1.18.2
server3   Ready    master   72m   v1.18.2
server4   Ready    master   68m   v1.18.2
server5   Ready    <none>   76s   v1.18.2

[root@server1 yum.repos.d]# kubectl get pod -n kube-system -o wide
NAME                              READY   STATUS    RESTARTS   AGE     IP            NODE      NOMINATED NODE   READINESS GATES
coredns-7ff77c879f-2nndl          1/1     Running   0          78m     10.244.2.2    server4   <none>           <none>
coredns-7ff77c879f-6cwlc          1/1     Running   0          78m     10.244.1.2    server3   <none>           <none>
etcd-server2                      1/1     Running   2          78m     172.25.60.2   server2   <none>           <none>
etcd-server3                      1/1     Running   1          73m     172.25.60.3   server3   <none>           <none>
etcd-server4                      1/1     Running   2          69m     172.25.60.4   server4   <none>           <none>
kube-apiserver-server2            1/1     Running   2          78m     172.25.60.2   server2   <none>           <none>
kube-apiserver-server3            1/1     Running   1          73m     172.25.60.3   server3   <none>           <none>
kube-apiserver-server4            1/1     Running   1          69m     172.25.60.4   server4   <none>           <none>
kube-controller-manager-server2   1/1     Running   7          78m     172.25.60.2   server2   <none>           <none>
kube-controller-manager-server3   1/1     Running   2          73m     172.25.60.3   server3   <none>           <none>
kube-controller-manager-server4   1/1     Running   3          69m     172.25.60.4   server4   <none>           <none>
kube-flannel-ds-amd64-6jjvw       1/1     Running   0          50m     172.25.60.3   server3   <none>           <none>
kube-flannel-ds-amd64-9vmcj       1/1     Running   0          2m29s   172.25.60.5   server5   <none>           <none>
kube-flannel-ds-amd64-h9sgg       1/1     Running   1          50m     172.25.60.2   server2   <none>           <none>
kube-flannel-ds-amd64-jg7rk       1/1     Running   0          50m     172.25.60.4   server4   <none>           <none>
kube-proxy-24vrj                  1/1     Running   0          78m     172.25.60.2   server2   <none>           <none>
kube-proxy-9mthn                  1/1     Running   0          69m     172.25.60.4   server4   <none>           <none>
kube-proxy-fjk78                  1/1     Running   0          73m     172.25.60.3   server3   <none>           <none>
kube-proxy-lw4z8                  1/1     Running   0          2m30s   172.25.60.5   server5   <none>           <none>
kube-scheduler-server2            1/1     Running   8          78m     172.25.60.2   server2   <none>           <none>
kube-scheduler-server3            1/1     Running   2          73m     172.25.60.3   server3   <none>           <none>
kube-scheduler-server4            1/1     Running   6          69m     172.25.60.4   server4   <none>           <none>

[root@server1 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@server1 ~]# source .bashrc

1.6 測試

測試pod能夠在server5上運行:

[root@server1 ~]# kubectl run test --image=busybox -it --restart=Never
If you don't see a command prompt, try pressing enter.
/ # [root@server1 ~]# 
[root@server1 ~]# kubectl get pod -o wide
NAME   READY   STATUS      RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
test   0/1     Completed   0          97s   10.244.3.2   server5   <none>           <none>

測試:server2關掉,查看負載均衡:

[root@server3 ~]# ip addr show  // server3接管了集羣(容許down掉的主節點個數(n-1)/2---(3-1)/2=1,咱們當前是3個主節點,說明只能down掉一個主節點)
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:59:12:9d brd ff:ff:ff:ff:ff:ff
    inet 172.25.60.3/24 brd 172.25.60.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.25.60.100/32 scope global eth0

能夠看出VIP漂移到了server3上,此時server3爲k8s:master節點,當server2起來時,因爲server2優先級高於其餘,此時server2又會變爲master。