1、如何從Kubernetes集羣中移除Node
好比從集羣中移除k8s-node03這個Node節點,作法以下:node
1)先在master節點查看Node狀況 [root@k8s-master01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-node01 Ready <none> 47d v1.14.2 k8s-node02 Ready <none> 47d v1.14.2 k8s-node03 Ready <none> 47d v1.14.2 2)接着查看下pod狀況 [root@k8s-master01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES dnsutils-ds-5sc4z 1/1 Running 963 40d 172.30.56.3 k8s-node02 <none> <none> dnsutils-ds-h546r 1/1 Running 963 40d 172.30.72.5 k8s-node03 <none> <none> dnsutils-ds-jx5kx 1/1 Running 963 40d 172.30.88.4 k8s-node01 <none> <none> kevin-nginx 1/1 Running 0 27d 172.30.72.11 k8s-node03 <none> <none> my-nginx-5dd67b97fb-69gvm 1/1 Running 0 40d 172.30.72.4 k8s-node03 <none> <none> my-nginx-5dd67b97fb-8j4k6 1/1 Running 0 40d 172.30.88.3 k8s-node01 <none> <none> nginx-7db9fccd9b-dkdzf 1/1 Running 0 27d 172.30.88.8 k8s-node01 <none> <none> nginx-7db9fccd9b-t8njb 1/1 Running 0 27d 172.30.72.10 k8s-node03 <none> <none> nginx-7db9fccd9b-vrp9f 1/1 Running 0 27d 172.30.56.6 k8s-node02 <none> <none> nginx-ds-4lf8z 1/1 Running 0 41d 172.30.56.2 k8s-node02 <none> <none> nginx-ds-6kfsw 1/1 Running 0 41d 172.30.72.2 k8s-node03 <none> <none> nginx-ds-xqdgw 1/1 Running 0 41d 172.30.88.2 k8s-node01 <none> <none> 3)封鎖k8s-node03這個node節點,排幹該node節點上的pod資源 [root@k8s-master01 ~]# kubectl drain k8s-node03 --delete-local-data --force --ignore-daemonsets node/k8s-node03 cordoned WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/kevin-nginx; ignoring DaemonSet-managed Pods: default/dnsutils-ds-h546r, default/nginx-ds-6kfsw, kube-system/node-exporter-zmb68 evicting pod "metrics-server-54997795d9-rczmc" evicting pod "kevin-nginx" evicting pod "nginx-7db9fccd9b-t8njb" evicting pod "coredns-5b969f4c88-pd5js" evicting pod "kubernetes-dashboard-7976c5cb9c-4jpzb" evicting pod "my-nginx-5dd67b97fb-69gvm" pod/my-nginx-5dd67b97fb-69gvm evicted pod/coredns-5b969f4c88-pd5js evicted pod/nginx-7db9fccd9b-t8njb evicted pod/kubernetes-dashboard-7976c5cb9c-4jpzb evicted pod/kevin-nginx evicted pod/metrics-server-54997795d9-rczmc evicted node/k8s-node03 evicted 4)接着刪除k8s-node03這個節點 [root@k8s-master01 ~]# kubectl delete node k8s-node03 node "k8s-node03" deleted 5)再查看pod狀況,發現原來在k8s-node03上的pod已經調度到其餘留存的node節點上了 [root@k8s-master01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES dnsutils-ds-5sc4z 1/1 Running 963 40d 172.30.56.3 k8s-node02 <none> <none> dnsutils-ds-jx5kx 1/1 Running 963 40d 172.30.88.4 k8s-node01 <none> <none> my-nginx-5dd67b97fb-8j4k6 1/1 Running 0 40d 172.30.88.3 k8s-node01 <none> <none> my-nginx-5dd67b97fb-kx2pc 1/1 Running 0 98s 172.30.56.7 k8s-node02 <none> <none> nginx-7db9fccd9b-7vbhq 1/1 Running 0 98s 172.30.88.7 k8s-node01 <none> <none> nginx-7db9fccd9b-dkdzf 1/1 Running 0 27d 172.30.88.8 k8s-node01 <none> <none> nginx-7db9fccd9b-vrp9f 1/1 Running 0 27d 172.30.56.6 k8s-node02 <none> <none> nginx-ds-4lf8z 1/1 Running 0 41d 172.30.56.2 k8s-node02 <none> <none> nginx-ds-xqdgw 1/1 Running 0 41d 172.30.88.2 k8s-node01 <none> <none> [root@k8s-master01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-node01 Ready <none> 47d v1.14.2 k8s-node02 Ready <none> 47d v1.14.2 6)最後在k8s-node03節點上執行清理操做: [root@k8s-node03 ~]# systemctl stop kubelet kube-proxy flanneld docker [root@k8s-node03 ~]# source /opt/k8s/bin/environment.sh [root@k8s-node03 ~]# mount | grep "${K8S_DIR}" | awk '{print $3}'|xargs sudo umount [root@k8s-node03 ~]# rm -rf ${K8S_DIR}/kubelet [root@k8s-node03 ~]# rm -rf ${DOCKER_DIR} [root@k8s-node03 ~]# rm -rf /var/run/flannel/ [root@k8s-node03 ~]# rm -rf /var/run/docker/ [root@k8s-node03 ~]# rm -rf /etc/systemd/system/{kubelet,docker,flanneld,kube-nginx}.service [root@k8s-node03 ~]# rm -rf /opt/k8s/bin/* [root@k8s-node03 ~]# rm -rf /etc/flanneld/cert /etc/kubernetes/cert [root@k8s-node03 ~]# iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat [root@k8s-node03 ~]# ip link del flannel.1 [root@k8s-node03 ~]# ip link del docker0
2、如何向Kubernetes集羣中加入Node節點
好比將以前移除的k8s-node03節點從新加入到k8s集羣中 (下面操做都在k8s-master01節點上完成)nginx
1)修改變量腳本文件/opt/k8s/bin/environment.sh裏的NODE節點爲k8s-node03節點,而後進行分發。 [root@k8s-master01 ~]# cp /opt/k8s/bin/environment.sh /opt/k8s/bin/environment.sh.bak1 [root@k8s-master01 ~]# vim /opt/k8s/bin/environment.sh ........ # 集羣中全部node節點集羣IP數組 export NODE_NODE_IPS=(172.16.60.246) # 集羣中node節點IP對應的主機名數組 export NODE_NODE_NAMES=(k8s-node03) [root@k8s-master01 ~]# diff /opt/k8s/bin/environment.sh /opt/k8s/bin/environment.sh.bak1 17c17 < export NODE_NODE_IPS=(172.16.60.246) --- > export NODE_NODE_IPS=(172.16.60.244 172.16.60.245 172.16.60.246) 19c19 < export NODE_NODE_NAMES=(k8s-node03) --- > export NODE_NODE_NAMES=(k8s-node01 k8s-node02 k8s-node03) 2)將以前在k8s-master01節點上生產的證書文件分發到新加入的node節點上 [root@k8s-master01 ~]# cd /opt/k8s/work/ [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "mkdir -p /etc/kubernetes/cert" scp ca*.pem ca-config.json root@${node_node_ip}:/etc/kubernetes/cert done 3) Flannel容器網絡 [root@k8s-master01 work]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" scp flannel/{flanneld,mk-docker-opts.sh} root@${node_node_ip}:/opt/k8s/bin/ ssh root@${node_node_ip} "chmod +x /opt/k8s/bin/*" done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "mkdir -p /etc/flanneld/cert" scp flanneld*.pem root@${node_node_ip}:/etc/flanneld/cert done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" scp flanneld.service root@${node_node_ip}:/etc/systemd/system/ done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld" done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "systemctl status flanneld|grep Active" done 4)部署node節點運行組件 -> 安裝依賴包 [root@k8s-master01 ~]# source /opt/k8s/bin/environment.sh [root@k8s-master01 ~]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "yum install -y epel-release" ssh root@${node_node_ip} "yum install -y conntrack ipvsadm ntp ntpdate ipset jq iptables curl sysstat libseccomp && modprobe ip_vs " done -> 部署docker組件 [root@k8s-master01 work]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" scp docker/* root@${node_node_ip}:/opt/k8s/bin/ ssh root@${node_node_ip} "chmod +x /opt/k8s/bin/*" done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" scp docker.service root@${node_node_ip}:/etc/systemd/system/ done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "mkdir -p /etc/docker/ ${DOCKER_DIR}/{data,exec}" scp docker-daemon.json root@${node_node_ip}:/etc/docker/daemon.json done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "systemctl daemon-reload && systemctl enable docker && systemctl restart docker" done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "systemctl status docker|grep Active" done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "/usr/sbin/ip addr show flannel.1 && /usr/sbin/ip addr show docker0" done -> 部署kubelet組件 [root@k8s-master01 ~]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" scp kubernetes/server/bin/kubelet root@${node_node_ip}:/opt/k8s/bin/ ssh root@${node_node_ip} "chmod +x /opt/k8s/bin/*" done -> 建立token(以前建立的已通過期,token有效期只有24h,即有效期只有一天!) [root@k8s-master01 work]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_name in ${NODE_NODE_NAMES[@]} do echo ">>> ${node_node_name}" # 建立 token export BOOTSTRAP_TOKEN=$(kubeadm token create \ --description kubelet-bootstrap-token \ --groups system:bootstrappers:${node_node_name} \ --kubeconfig ~/.kube/config) # 設置集羣參數 kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/cert/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kubelet-bootstrap-${node_node_name}.kubeconfig # 設置客戶端認證參數 kubectl config set-credentials kubelet-bootstrap \ --token=${BOOTSTRAP_TOKEN} \ --kubeconfig=kubelet-bootstrap-${node_node_name}.kubeconfig # 設置上下文參數 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=kubelet-bootstrap-${node_node_name}.kubeconfig # 設置默認上下文 kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_node_name}.kubeconfig done 查看 kubeadm 爲各新節點建立的 token: [root@k8s-master01 work]# kubeadm token list --kubeconfig ~/.kube/config TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS sdwq5g.llzr9ytm32h1mnh1 23h 2019-08-06T11:47:47+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-node03 [root@k8s-master01 work]# kubectl get secrets -n kube-system|grep bootstrap-token bootstrap-token-sdwq5g bootstrap.kubernetes.io/token 7 77s [root@k8s-master01 work]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_name in ${NODE_NODE_NAMES[@]} do echo ">>> ${node_node_name}" scp kubelet-bootstrap-${node_node_name}.kubeconfig root@${node_node_name}:/etc/kubernetes/kubelet-bootstrap.kubeconfig done -> 分發 bootstrap kubeconfig 文件到新增node節點 [root@k8s-master01 work]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_name in ${NODE_NODE_NAMES[@]} do echo ">>> ${node_node_name}" scp kubelet-bootstrap-${node_node_name}.kubeconfig root@${node_node_name}:/etc/kubernetes/kubelet-bootstrap.kubeconfig done -> 分發 kubelet 參數配置文件 [root@k8s-master01 work]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" sed -e "s/##NODE_NODE_IP##/${node_node_ip}/" kubelet-config.yaml.template > kubelet-config-${node_node_ip}.yaml.template scp kubelet-config-${node_node_ip}.yaml.template root@${node_node_ip}:/etc/kubernetes/kubelet-config.yaml done -> 分發 kubelet systemd unit 文件 [root@k8s-master01 work]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_name in ${NODE_NODE_NAMES[@]} do echo ">>> ${node_node_name}" sed -e "s/##NODE_NODE_NAME##/${node_node_name}/" kubelet.service.template > kubelet-${node_node_name}.service scp kubelet-${node_node_name}.service root@${node_node_name}:/etc/systemd/system/kubelet.service done -> 啓動 kubelet 服務 [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "mkdir -p ${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/" ssh root@${node_node_ip} "/usr/sbin/swapoff -a" ssh root@${node_node_ip} "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet" done -> 部署 kube-proxy 組件 [root@k8s-master01 ~]# cd /opt/k8s/work [root@k8s-master01 work]# source /opt/k8s/bin/environment.sh [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" scp kubernetes/server/bin/kube-proxy root@${node_node_ip}:/opt/k8s/bin/ ssh root@${node_node_ip} "chmod +x /opt/k8s/bin/*" done [root@k8s-master01 work]# for node_node_name in ${NODE_NODE_NAMES[@]} do echo ">>> ${node_node_name}" scp kube-proxy.kubeconfig root@${node_node_name}:/etc/kubernetes/ done ================================================================================================================================================= 特別注意(若是是徹底新增node節點,則這裏須要添加下面操做): 因爲這裏是恢復以前移除的k8s-node03節點,故這裏不須要從新根據kube-proxy配置模板生成對應的新增node節點的配置文件(由於以前已經生成過了) [root@k8s-master01 work]# ll kube-proxy-config-k8s-node* -rw-r--r-- 1 root root 500 Jun 24 20:27 kube-proxy-config-k8s-node01.yaml.template -rw-r--r-- 1 root root 500 Jun 24 20:27 kube-proxy-config-k8s-node02.yaml.template -rw-r--r-- 1 root root 500 Jun 24 20:27 kube-proxy-config-k8s-node03.yaml.template 若是是徹底新增長的節點,好比新增長的node節點172.16.60.240 (主機名: k8s-node04), 則這一步還須要拷貝已存在node節點的配置文件爲新增node節點的配置文件,而後分發過去 [root@k8s-master01 work]# cp kube-proxy-config-k8s-node03.yaml.template kube-proxy-config-k8s-node04.yaml.template [root@k8s-master01 work]# sed -i 's/172.16.60.246/172.16.60.240/g' kube-proxy-config-k8s-node04.yaml.template [root@k8s-master01 work]# sed -i 's/k8s-node03/k8s-node04/g' kube-proxy-config-k8s-node04.yaml.template [root@k8s-master01 work]# scp kube-proxy-config-k8s-node04.yaml.template root@k8s-node04:/etc/kubernetes/kube-proxy-config.yaml 若是是新增多個node節點,則一樣是拷貝已存在node節點的配置文件爲各個新增node節點的配置文件,而後分發過去 ================================================================================================================================================= [root@k8s-master01 work]# for node_node_name in ${NODE_NODE_NAMES[@]} do echo ">>> ${node_node_name}" scp kube-proxy.service root@${node_node_name}:/etc/systemd/system/ done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "mkdir -p ${K8S_DIR}/kube-proxy" ssh root@${node_node_ip} "modprobe ip_vs_rr" ssh root@${node_node_ip} "systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy" done [root@k8s-master01 work]# for node_node_ip in ${NODE_NODE_IPS[@]} do echo ">>> ${node_node_ip}" ssh root@${node_node_ip} "systemctl status kube-proxy|grep Active" done -> 手動 approve server cert csr [root@k8s-master01 work]# kubectl get csr NAME AGE REQUESTOR CONDITION csr-5fwlh 3m34s system:bootstrap:sdwq5g Approved,Issued csr-t547p 3m21s system:node:k8s-node03 Pending [root@k8s-master01 work]# kubectl certificate approve csr-t547p certificatesigningrequest.certificates.k8s.io/csr-t547p approved [root@k8s-master01 work]# kubectl get csr NAME AGE REQUESTOR CONDITION csr-5fwlh 3m53s system:bootstrap:sdwq5g Approved,Issued csr-t547p 3m40s system:node:k8s-node03 Approved,Issued -> 查看集羣狀態,發現k8s-node03節點已經被從新加入到集羣中了,而且已經分配了pod資源。 [root@k8s-master01 work]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-node01 Ready <none> 47d v1.14.2 k8s-node02 Ready <none> 47d v1.14.2 k8s-node03 Ready <none> 1s v1.14.2 [root@k8s-master01 work]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES dnsutils-ds-5sc4z 1/1 Running 965 40d 172.30.56.3 k8s-node02 <none> <none> dnsutils-ds-gc8sb 1/1 Running 1 94m 172.30.72.2 k8s-node03 <none> <none> dnsutils-ds-jx5kx 1/1 Running 966 40d 172.30.88.4 k8s-node01 <none> <none> my-nginx-5dd67b97fb-8j4k6 1/1 Running 0 40d 172.30.88.3 k8s-node01 <none> <none> my-nginx-5dd67b97fb-kx2pc 1/1 Running 0 174m 172.30.56.7 k8s-node02 <none> <none> nginx-7db9fccd9b-7vbhq 1/1 Running 0 174m 172.30.88.7 k8s-node01 <none> <none> nginx-7db9fccd9b-dkdzf 1/1 Running 0 27d 172.30.88.8 k8s-node01 <none> <none> nginx-7db9fccd9b-vrp9f 1/1 Running 0 27d 172.30.56.6 k8s-node02 <none> <none> nginx-ds-4lf8z 1/1 Running 0 41d 172.30.56.2 k8s-node02 <none> <none> nginx-ds-jn759 1/1 Running 0 94m 172.30.72.3 k8s-node03 <none> <none> nginx-ds-xqdgw 1/1 Running 0 41d 172.30.88.2 k8s-node01 <none> <none> [root@k8s-master01 work]# kubectl top node NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% k8s-node01 96m 2% 2123Mi 55% k8s-node02 133m 3% 1772Mi 46% k8s-node03 46m 1% 4859Mi 61% ======================================================================================================================================= 注意一 若是是添加全新的節點到上述k8s集羣中,作法以下: 1)作好node節點的環境初始化準備,如作好K8s-master01到新增節點的ssh無密碼登陸的信任關係;etc/hosts裏作好綁定;關閉防火牆等。 2)在/opt/k8s/bin/environment.sh變量腳本里,將NODE_NODE_IPS和NODE_NODE_NAMES變量改爲新增node節點的對應信息 3)按照上面添加k8s-node03節點的一系列添加步驟所有執行一遍便可 ====================================================================================================================================== 注意二 上面使用的是二進制方式按照k8s集羣。若是使用kubeadmin工具建立的k8s集羣,則從新使node加入集羣的操做以下: 使節點加入集羣的命令格式(node節點上操做,使用root用戶): # kubeadm join --token <token> <master-ip>:<master-port> --discovery-token-ca-cert-hash sha256:<hash> 若是忘記了Master節點的token,可使用下面命令查看(master節點上操做): # kubeadm token list 默認狀況下,token的有效期是24小時,若是token已通過期的話,可使用下面命令從新生成(master節點上操做); # kubeadm token create 若是找不到--discovery-token-ca-cert-hash的值,可使用如下命令生成(master節點上操做): # openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' 加入節點後,稍等一下子,便可看到節點已加入(master節點上操做)