kubeadm部署k8s集羣過程

[root@work1 ~]# ls /etc/yum.repos.d/
CentOS-Base.repo  CentOS-Debuginfo.repo  CentOS-Media.repo    CentOS-Vault.repo
CentOS-CR.repo    CentOS-fasttrack.repo  CentOS-Sources.repo
[root@work1 ~]# curl -o /etc/yum.repos.d/private.repo 172.0.5.75/app-mirror-conf/centos/7/priv                ate.repo
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100   356  100   356    0     0   9034      0 --:--:-- --:--:-- --:--:--  9368
[root@work1 ~]# ls /etc/yum.repos.d/
CentOS-Base.repo  CentOS-Debuginfo.repo  CentOS-Media.repo    CentOS-Vault.repo
CentOS-CR.repo    CentOS-fasttrack.repo  CentOS-Sources.repo  private.repo
[root@work1 ~]# yum update
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.njupt.edu.cn
 * extras: mirror.lzu.edu.cn
 * updates: mirrors.njupt.edu.cn
CentOS-Local-Base                                                      | 2.9 kB  00:00:00
CentOS-Local-Extras                                                    | 2.9 kB  00:00:00
CentOS-Local-Updates                                                   | 2.9 kB  00:00:00
base                                                                   | 3.6 kB  00:00:00
extras                                                                 | 2.9 kB  00:00:00
updates                                                                | 2.9 kB  00:00:00
.....
[root@work1 ~]# vi /etc/hosts
[root@work1 ~]# setenforce 0
[root@work1 ~]# vi /etc/selinux/config
[root@work1 ~]# vi /etc/sysctl.d/k8s.conf
[root@work1 ~]# modprobe br_netfilter
[root@work1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@work1 ~]# vi /etc/sysconfig/modules/ipvs.modules
[root@work1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/i                pvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4      15053  0
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
ip_vs_sh               12688  0
ip_vs_wrr              12697  0
ip_vs_rr               12600  0
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          137239  2 ip_vs,nf_conntrack_ipv4
libcrc32c              12644  3 xfs,ip_vs,nf_conntrack
[root@work1 ~]# yum install ipset
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirror.jdcloud.com
 * extras: mirror.bit.edu.cn
 * updates: mirror.bit.edu.cn
 ...
[root@work1 ~]# yum install ipvsadm
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.cqu.edu.cn
 * extras: mirrors.cqu.edu.cn
 * updates: ftp.ksu.edu.tw
.....
[root@work1 ~]# yum install chrony -y
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.zju.edu.cn
 * extras: mirrors.cn99.com
 * updates: ftp.twaren.net
Package chrony-3.4-1.el7.x86_64 already installed and latest version
Nothing to do
[root@work1 ~]# systemctl enable chronyd
[root@work1 ~]# systemctl start chronyd
[root@work1 ~]# chronyc sources
210 Number of sources = 4
MS Name/IP address         Stratum Poll Reach LastRx Last sample
===============================================================================
^* de-user.deepinid.deepin.>     3   7   177   385  -2687us[ +724us] +/-  114ms
^- tick.ntp.infomaniak.ch        1   7   117     1    -16ms[  -16ms] +/-  103ms
^? tunnel298741-pt.tunnel.t>     0   9     0     -     +0ns[   +0ns] +/-    0ns
^? ntp8.flashdance.cx            2   8     4   774    +42ms[  +49ms] +/-  190ms
[root@work1 ~]# date
Fri Nov 15 09:27:36 UTC 2019
[root@work1 ~]# swapoff -a
[root@work1 ~]# vi /etc/fstab
[root@work1 ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:          64261         563       62268          24        1429       63189
Swap:             0           0           0
[root@work1 ~]# vi /etc/sysctl.d/k8s.conf
[root@work1 ~]# yum install -y yum-utils \
>   device-mapper-persistent-data \
>   lvm2
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.nju.edu.cn
 * extras: mirrors.neusoft.edu.cn
 * updates: ftp.twaren.net
.....
[root@work1 ~]# yum-config-manager \
>     --add-repo \
>     https://download.docker.com/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror
adding repo from: https://download.docker.com/linux/centos/docker-ce.repo
grabbing file https://download.docker.com/linux/centos/docker-ce.repo to /etc/yum.repos.d/dock                er-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@work1 ~]# yum list docker-ce --showduplicates | sort -r
 * updates: mirrors.cqu.edu.cn
Loading mirror speeds from cached hostfile
Loaded plugins: fastestmirror
 * extras: mirrors.cqu.edu.cn
docker-ce.x86_64            3:19.03.5-3.el7                     docker-ce-stable
docker-ce.x86_64            3:19.03.4-3.el7                     docker-ce-stable
docker-ce.x86_64            3:19.03.3-3.el7                     docker-ce-stable
docker-ce.x86_64            3:19.03.2-3.el7                     docker-ce-stable
docker-ce.x86_64            3:19.03.1-3.el7                     docker-ce-stable
docker-ce.x86_64            3:19.03.0-3.el7                     docker-ce-stable
.....
[root@work1 ~]# yum install docker-ce-19.03.5-3.el7
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.njupt.edu.cn
 * extras: mirrors.neusoft.edu.cn
 * updates: ftp.ksu.edu.tw
...
[root@work1 ~]# cd /etc/docker
[root@work1 docker]# vi daemon.json
[root@work1 docker]# cat daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://ot2k4d59.mirror.aliyuncs.com/"],
  "insecure-registries": ["registry.gitlab.casa-systems.com", "10.46.201.144:5000"]
}
[root@work1 docker]# systemctl start docker
[root@work1 docker]# docker ps
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
[root@work1 docker]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@work1 docker]# vi /etc/yum.repos.d/kubernetes.repo
[root@work1 docker]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
[root@work1 docker]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.163.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.cqu.edu.cn
kubernetes                                                                             | 1.4 kB  00:00:00
kubernetes/primary                                                                     |  59 kB  00:00:00
kubernetes                                                                                            430/430
...
[root@work1 docker]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"16", GitVersion:"v1.16.3", GitCommit:"b3cbbae08ec52a7fc73d334838e18d17e8512749", GitTreeState:"clean", BuildDate:"2019-11-13T11:20:25Z", GoVersion:"go1.12.12", Compiler:"gc", Platform:"linux/amd64"}
[root@work1 docker]# systemctl enable kubelet.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@master ~]# kubeadm config print init-defaults > kubeadm.yaml
[root@master ~]# ls
anaconda-ks.cfg  kubeadm.yaml  original-ks.cfg
[root@master ~]# vi kubeadm.yaml
[root@master ~]# kubeadm init --config kubeadm.yaml
[init] Using Kubernetes version: v1.16.0
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.0.10.190]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [172.0.10.190 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [172.0.10.190 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 41.027593 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.0.10.190:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1edc4812cfbaf891f45e3baf56cc7ec952b857e46d702ea9e1d9dd290256df01
[root@master ~]# kubectl get no
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@master ~]#
[root@master ~]# echo $HOME
/root
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@master ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   35m
kube-node-lease   Active   35m
kube-public       Active   35m
kube-system       Active   35m
[root@master ~]# kubectl get all
NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   35m
[root@master ~]# kubectl get no
NAME     STATUS     ROLES    AGE   VERSION
master   NotReady   master   35m   v1.16.3
[root@work1 ~]# kubeadm join 172.0.10.190:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:1edc4812cfbaf891f45e3baf56cc7ec952b857e46d702ea9e1d9dd290256df01
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. L              atest validated version: 18.09
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube              -system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@master ~]# kubectl get no
NAME     STATUS     ROLES    AGE   VERSION
master   NotReady   master   52m   v1.16.3
work1    NotReady   <none>   28s   v1.16.3

部署calico網絡插件node

[root@master ~]# wget https://docs.projectcalico.org/v3.10/manifests/calico.yaml
--2019-11-18 03:18:34--  https://docs.projectcalico.org/v3.10/manifests/calico.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 2400:6180:0:d1::575:a001, 134.209.106.40
Connecting to docs.projectcalico.org (docs.projectcalico.org)|2400:6180:0:d1::575:a001|:443... failed: Network               is unreachable.
Connecting to docs.projectcalico.org (docs.projectcalico.org)|134.209.106.40|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 20653 (20K) [application/x-yaml]
Saving to: ‘calico.yaml’

100%[====================================================================>] 20,653       109KB/s   in 0.2s

2019-11-18 03:18:35 (109 KB/s) - ‘calico.yaml’ saved [20653/20653]

[root@master ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

問題:calico-node 和 coredns 不能正常啓動linux

[root@master ~]# kubectl get pods -n kube-system
NAME                                       READY   STATUS              RESTARTS   AGE
calico-kube-controllers-6b64bcd855-shsnb   0/1     ContainerCreating   0          127m
calico-node-hhc6w                          0/1     PodInitializing     0          15s
calico-node-jlpcd                          0/1     PodInitializing     0          15s
coredns-667f964f9b-flz94                   0/1     ContainerCreating   0          4h21m
coredns-667f964f9b-vpv2t                   0/1     ContainerCreating   0          4h21m
etcd-master                                1/1     Running             0          4h20m
kube-apiserver-master                      1/1     Running             0          4h20m
kube-controller-manager-master             1/1     Running             0          4h20m
kube-proxy-4cpmq                           1/1     Running             0          3h29m
kube-proxy-dchdk                           1/1     Running             0          4h21m
kube-scheduler-master                      1/1     Running             0          4h20m

查看緣由:kubectl describe pod/coredns-667f964f9b-flz94 -n kube-systemgit

Events:
  Type     Reason            Age                   From               Message
  ----     ------            ----                  ----               -------
  Normal   Scheduled         <unknown>             default-scheduler  Successfully assigned kube-system/calico-node-ksl2c to master
  Normal   Pulling           23m (x3 over 24m)     kubelet, master    Pulling image "172.0.5.75:5000/calico/cni:v3.10.1"
  Warning  Failed            23m (x3 over 24m)     kubelet, master    Failed to pull image "172.0.5.75:5000/calico/cni:v3.10.1": rpc error: code = Unknown desc = Error response from daemon: Get https://172.0.5.75:5000/v2/: x509: certificate signed by unknown authority
  Warning  Failed            23m (x3 over 24m)     kubelet, master    Error: ErrImagePull
  Normal   BackOff           23m (x4 over 24m)     kubelet, master    Back-off pulling image "172.0.5.75:5000/calico/cni:v3.10.1"
  Warning  Failed            23m (x4 over 24m)     kubelet, master    Error: ImagePullBackOff
  Warning  DNSConfigForming  4m25s (x96 over 24m)  kubelet, master    Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: 211.136.192.6 211.136.20.203 210.21.4.130

kubelet服務也發生錯誤:docker

[root@master ~]# systemctl status kubelet.service
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /usr/lib/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: active (running) since Mon 2019-11-18 02:09:50 UTC; 4h 52min ago
     Docs: https://kubernetes.io/docs/
 Main PID: 532 (kubelet)
    Tasks: 38
   Memory: 49.3M
   CGroup: /system.slice/kubelet.service
           └─532 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kuberne...

Nov 18 06:59:59 master kubelet[532]: E1118 06:59:59.763508     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:00:13 master kubelet[532]: E1118 07:00:13.759222     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:00:24 master kubelet[532]: E1118 07:00:24.759838     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:00:36 master kubelet[532]: E1118 07:00:36.759813     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:00:50 master kubelet[532]: E1118 07:00:50.758987     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:01:05 master kubelet[532]: E1118 07:01:05.760938     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:01:20 master kubelet[532]: E1118 07:01:20.760125     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:01:33 master kubelet[532]: E1118 07:01:33.759450     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:01:44 master kubelet[532]: E1118 07:01:44.759715     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Nov 18 07:01:56 master kubelet[532]: E1118 07:01:56.760764     532 pod_workers.go:191] Error syncing pod bb63c358-0d18-4f...
Hint: Some lines were ellipsized, use -l to show in full.

解決方法:
在各個節點中修改/etc/resolv.conf中修改 nameserver 的個數,例如:json

[root@master ~]# cat /etc/resolv.conf
; generated by /usr/sbin/dhclient-script
nameserver 211.136.192.6
nameserver 211.136.20.203
nameserver 210.21.4.130
#nameserver 221.5.88.88
#nameserver 202.96.128.166

再查看 kubelet 的狀態:bootstrap

[root@master ~]# systemctl status kubelet.service
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /usr/lib/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: active (running) since Mon 2019-11-18 07:12:43 UTC; 2min 33s ago
     Docs: https://kubernetes.io/docs/
 Main PID: 9907 (kubelet)
    Tasks: 33
   Memory: 42.0M
   CGroup: /system.slice/kubelet.service
           └─9907 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubern...

Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291372    9907 reconciler.go:207] operationExecutor.VerifyCont...74bf")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291433    9907 reconciler.go:207] operationExecutor.VerifyCont...5704")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291472    9907 reconciler.go:207] operationExecutor.VerifyCont...74bf")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291512    9907 reconciler.go:207] operationExecutor.VerifyCont...74bf")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291566    9907 reconciler.go:207] operationExecutor.VerifyCont...5704")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291684    9907 reconciler.go:207] operationExecutor.VerifyCont...784b")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291731    9907 reconciler.go:207] operationExecutor.VerifyCont...784b")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.291828    9907 reconciler.go:207] operationExecutor.VerifyCont...784b")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.392422    9907 reconciler.go:207] operationExecutor.VerifyCont...c19b")
Nov 18 07:13:05 master kubelet[9907]: I1118 07:13:05.393318    9907 reconciler.go:154] Reconciler: start to sync state
Hint: Some lines were ellipsized, use -l to show in full.
[root@master ~]# kubectl get all --all-namespaces
NAMESPACE     NAME                                           READY   STATUS    RESTARTS   AGE
kube-system   pod/calico-kube-controllers-6b64bcd855-shsnb   1/1     Running   0          3h31m
kube-system   pod/calico-node-hhc6w                          1/1     Running   0          83m
kube-system   pod/calico-node-jlpcd                          1/1     Running   0          83m
kube-system   pod/coredns-667f964f9b-flz94                   1/1     Running   0          5h44m
kube-system   pod/coredns-667f964f9b-vpv2t                   1/1     Running   0          5h44m
kube-system   pod/etcd-master                                1/1     Running   0          5h44m
kube-system   pod/kube-apiserver-master                      1/1     Running   0          5h43m
kube-system   pod/kube-controller-manager-master             1/1     Running   0          5h44m
kube-system   pod/kube-proxy-4cpmq                           1/1     Running   0          4h53m
kube-system   pod/kube-proxy-dchdk                           1/1     Running   0          5h44m
kube-system   pod/kube-scheduler-master                      1/1     Running   0          5h43m

NAMESPACE     NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
default       service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP                  5h45m
kube-system   service/kube-dns     ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   5h45m

NAMESPACE     NAME                         DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE
kube-system   daemonset.apps/calico-node   2         2         2       2            2           beta.kubernetes.io/os=linux   3h31m
kube-system   daemonset.apps/kube-proxy    2         2         2       2            2           beta.kubernetes.io/os=linux   5h45m

NAMESPACE     NAME                                      READY   UP-TO-DATE   AVAILABLE   AGE
kube-system   deployment.apps/calico-kube-controllers   1/1     1            1           3h31m
kube-system   deployment.apps/coredns                   2/2     2            2           5h45m

NAMESPACE     NAME                                                 DESIRED   CURRENT   READY   AGE
kube-system   replicaset.apps/calico-kube-controllers-6b64bcd855   1         1         1       3h31m
kube-system   replicaset.apps/coredns-667f964f9b                   2         2         2       5h44m
相關文章
相關標籤/搜索