master初始化完成後,如下兩個組件狀態顯示依然爲Unhealthynode
root@master1:~$ sudo kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused etcd-0 Healthy {"health":"true"}
網上說修改bootstrap
root@master1:~$ ls /etc/kubernetes/manifests/
etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml
vim
修改清單文件,註釋掉--port=0這一行,在對清單文件進行修改時先作備份操做api
注意:
在對清單文件作備份時,不要直接把清單文件備份在平級目錄裏,即/etc/kubernetes/manifests目錄,須要備份到其餘目錄中或在平級目錄再建立一個相似/etc/kubernetes/manifests/bak的備份目錄,不然按照如下操做後master節點上依然沒法監聽10251和10252兩個端口,組件健康狀態依然沒法恢復爲health狀態。
tcp
root@master1:~$ vim /etc/kubernetes/manifests/kube-controller-manager.yaml - command: - kube-controller-manager - --allocate-node-cidrs=true - --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf - --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf - --bind-address=127.0.0.1 - --client-ca-file=/etc/kubernetes/pki/ca.crt - --cluster-cidr=10.244.0.0/16 - --cluster-name=kubernetes - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key - --controllers=*,bootstrapsigner,tokencleaner - --kubeconfig=/etc/kubernetes/controller-manager.conf - --leader-elect=true - --node-cidr-mask-size=24 - --port=0 ########################## 刪除這行 ######### - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt - --root-ca-file=/etc/kubernetes/pki/ca.crt - --service-account-private-key-file=/etc/kubernetes/pki/sa.key - --service-cluster-ip-range=10.96.0.0/12 - --use-service-account-credentials=true root@master1:~$ vim /etc/kubernetes/manifests/kube-scheduler.yaml - command: - kube-scheduler - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf - --bind-address=127.0.0.1 - --kubeconfig=/etc/kubernetes/scheduler.conf - --leader-elect=true - --port=0 ########### 刪除這行 #################
重啓kubelet服務ide
root@master1:~$ systemctl restart kubelet
查看監聽監聽端口以及組件狀態rest
root@master1:~$ ss -tanlp | grep '10251\|10252' LISTEN 0 128 *:10251 *:* users:(("kube-scheduler",pid=51054,fd=5)) LISTEN 0 128 *:10252 *:* users:(("kube-controller",pid=51100,fd=5)) root@master1:~$ kubectl get cs NAME STATUS MESSAGE ERROR scheduler Healthy ok controller-manager Healthy ok etcd-0 Healthy {"health":"true"}