主機名稱 | IP地址 | 部署軟件 | 備註 |
---|---|---|---|
M-kube12 | 192.168.10.12 | master+etcd+docker+keepalived+haproxy | master |
M-kube13 | 192.168.10.13 | master+etcd+docker+keepalived+haproxy | master |
M-kube14 | 192.168.10.14 | master+etcd+docker+keepalived+haproxy | master |
N-kube15 | 192.168.10.15 | docker+node | node |
N-kube16 | 192.168.10.16 | docker+node | node |
VIP | 192.168.10.100 | VIP |
#參考別人的
cat << EOF > /etc/sysconfig/modules/ipvs.modules #!/bin/bash ipvs_modules_dir="/usr/lib/modules/\`uname -r\`/kernel/net/netfilter/ipvs" for i in \`ls \$ipvs_modules_dir | sed -r 's#(.*).ko.*#\1#'\`; do /sbin/modinfo -F filename \$i &> /dev/null if [ \$? -eq 0 ]; then /sbin/modprobe \$i fi done EOF chmod +x /etc/sysconfig/modules/ipvs.modules bash /etc/sysconfig/modules/ipvs.modules
yum install -y keepalived #10.12機器上配置 cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://192.168.10.100:6444" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state MASTER interface ens33 virtual_router_id 100 priority 100 advert_int 1 mcast_src_ip 192.168.10.12 nopreempt authentication { auth_type PASS auth_pass fana123 } unicast_peer { 192.168.10.13 192.168.10.14 } virtual_ipaddress { 192.168.10.100/24 } track_script { CheckK8sMaster } } EOF #13機器keepalived配置 cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://192.168.10.100:6444" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state BACKUP interface ens33 virtual_router_id 100 priority 90 advert_int 1 mcast_src_ip 192.168.10.13 nopreempt authentication { auth_type PASS auth_pass fana123 } unicast_peer { 192.168.10.12 192.168.10.14 } virtual_ipaddress { 192.168.10.100/24 } track_script { CheckK8sMaster } } EOF #14機器上keepalived配置 cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://192.168.10.100:6444" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state BACKUP interface ens33 virtual_router_id 100 priority 80 advert_int 1 mcast_src_ip 192.168.10.14 nopreempt authentication { auth_type PASS auth_pass fana123 } unicast_peer { 192.168.10.12 192.168.10.13 } virtual_ipaddress { 192.168.10.100/24 } track_script { CheckK8sMaster } } EOF #啓動keepalived systemctl restart keepalived && systemctl enable keepalived
yum install -y haproxy #13機器上配置 cat << EOF > /etc/haproxy/haproxy.cfg global log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon defaults mode tcp log global retries 3 timeout connect 10s timeout client 1m timeout server 1m frontend kubernetes bind *:6444 mode tcp default_backend kubernetes-master backend kubernetes-master balance roundrobin server M-kube12 192.168.10.12:6443 check maxconn 2000 server M-kube13 192.168.10.13:6443 check maxconn 2000 server M-kube14 192.168.10.14:6443 check maxconn 2000 EOF #12,13,和 14機器上配置都同樣 # 啓動haproxy systemctl enable haproxy && systemctl start haproxy 也能夠用容器的方式部署 Copy # haproxy啓動腳本 mkdir -p /data/lb cat > /data/lb/start-haproxy.sh << "EOF" #!/bin/bash MasterIP1=192.168.10.12 MasterIP2=192.168.10.13 MasterIP3=192.168.10.14 MasterPort=6443 docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \ -e MasterIP1=$MasterIP1 \ -e MasterIP2=$MasterIP2 \ -e MasterIP3=$MasterIP3 \ -e MasterPort=$MasterPort \ wise2c/haproxy-k8s EOF #keepalived啓動腳本 cat > /data/lb/start-keepalived.sh << "EOF" #!/bin/bash VIRTUAL_IP=192.168.10.100 INTERFACE=ens33 NETMASK_BIT=24 CHECK_PORT=6444 RID=10 VRID=160 MCAST_GROUP=224.0.0.18 docker run -itd --restart=always --name=Keepalived-K8S \ --net=host --cap-add=NET_ADMIN \ -e VIRTUAL_IP=$VIRTUAL_IP \ -e INTERFACE=$INTERFACE \ -e CHECK_PORT=$CHECK_PORT \ -e RID=$RID \ -e VRID=$VRID \ -e NETMASK_BIT=$NETMASK_BIT \ -e MCAST_GROUP=$MCAST_GROUP \ wise2c/keepalived-k8s EOF #把腳本拷貝到13和14機器上,而後啓動 sh /data/lb/start-haproxy.sh && sh /data/lb/start-keepalived.sh docker ps #能夠看到容器的啓動狀態,相關配置文件能夠進入容器查看
#下載cfssl包 wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 #設置cfssl環境 chmod +x cfssl* mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo export PATH=/usr/local/bin:$PATH #配置CA文件(IP地址爲etc節點的IP) mkdir /root/ssl && cd /root/ssl cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "8760h" }, "profiles": { "kubernetes-Soulmate": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "8760h" } } } } EOF #--------------------------------------------------------# cat > ca-csr.json <<EOF { "CN": "kubernetes-Soulmate", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "shanghai", "L": "shanghai", "O": "k8s", "OU": "System" } ] } EOF #--------------------------------------------------------# cat > etcd-csr.json <<EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.10.12", "192.168.10.13", "192.168.10.14" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "shanghai", "L": "shanghai", "O": "k8s", "OU": "System" } ] } EOF #--------------------------------------------------------# cfssl gencert -initca ca-csr.json | cfssljson -bare ca cfssl gencert -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd #將10.13的etcd證書分發到14,15機器上 mkdir -p /etc/etcd/ssl && cp *.pem /etc/etcd/ssl/ ssh -n 192.168.10.13 "mkdir -p /etc/etcd/ssl && exit" ssh -n 192.168.10.14 "mkdir -p /etc/etcd/ssl && exit" scp -r /etc/etcd/ssl/*.pem 192.168.10.13:/etc/etcd/ssl/ scp -r /etc/etcd/ssl/*.pem 192.168.10.14:/etc/etcd/ssl/
yum install etcd -y mkdir -p /var/lib/etcd
#10.12機器上操做
cat <<EOF >/etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/bin/etcd \ --name M-kube12 \ --cert-file=/etc/etcd/ssl/etcd.pem \ --key-file=/etc/etcd/ssl/etcd-key.pem \ --trusted-ca-file=/etc/etcd/ssl/ca.pem \ --peer-cert-file=/etc/etcd/ssl/etcd.pem \ --peer-key-file=/etc/etcd/ssl/etcd-key.pem \ --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \ --initial-advertise-peer-urls https://192.168.10.12:2380 \ --listen-peer-urls https://192.168.10.12:2380 \ --listen-client-urls https://192.168.10.12:2379,http://127.0.0.1:2379 \ --advertise-client-urls https://192.168.10.12:2379 \ --initial-cluster-token etcd-cluster-0 \ --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 \ --initial-cluster-state new \ --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
#10.13上機器操做
cat <<EOF >/etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/bin/etcd \ --name M-kube13 \ --cert-file=/etc/etcd/ssl/etcd.pem \ --key-file=/etc/etcd/ssl/etcd-key.pem \ --peer-cert-file=/etc/etcd/ssl/etcd.pem \ --peer-key-file=/etc/etcd/ssl/etcd-key.pem \ --trusted-ca-file=/etc/etcd/ssl/ca.pem \ --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \ --initial-advertise-peer-urls https://192.168.10.13:2380 \ --listen-peer-urls https://192.168.10.13:2380 \ --listen-client-urls https://192.168.10.13:2379,http://127.0.0.1:2379 \ --advertise-client-urls https://192.168.10.13:2379 \ --initial-cluster-token etcd-cluster-0 \ --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 \ --initial-cluster-state new \ --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
#10.14機器上操做
cat <<EOF >/etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/bin/etcd \ --name M-kube14 \ --cert-file=/etc/etcd/ssl/etcd.pem \ --key-file=/etc/etcd/ssl/etcd-key.pem \ --peer-cert-file=/etc/etcd/ssl/etcd.pem \ --peer-key-file=/etc/etcd/ssl/etcd-key.pem \ --trusted-ca-file=/etc/etcd/ssl/ca.pem \ --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \ --initial-advertise-peer-urls https://192.168.10.14:2380 \ --listen-peer-urls https://192.168.10.14:2380 \ --listen-client-urls https://192.168.10.14:2379,http://127.0.0.1:2379 \ --advertise-client-urls https://192.168.10.14:2379 \ --initial-cluster-token etcd-cluster-0 \ --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 \ --initial-cluster-state new \ --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
#添加自啓動
cp /etc/systemd/system/etcd.service /usr/lib/systemd/system/ systemctl daemon-reload && systemctl start etcd && systemctl enable etcd && systemctl status etcd
#在etc節點上檢查
etcdctl --endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \ --ca-file=/etc/etcd/ssl/ca.pem \ --cert-file=/etc/etcd/ssl/etcd.pem \ --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health
#正常的話會有以下提示
member 1af68d968c7e3f22 is healthy: got healthy result from https://192.168.10.12:2379 member 55204c19ed228077 is healthy: got healthy result from https://192.168.10.14:2379 member e8d9a97b17f26476 is healthy: got healthy result from https://192.168.10.13:2379 cluster is healthy
現在Docker分爲了Docker-CE和Docker-EE兩個版本,CE爲社區版即免費版,EE爲企業版即商業版。咱們選擇使用CE版。node
在全部的機器上安裝dockerlinux
yum安裝dockergit
#1.安裝yum源工具包 yum install -y yum-utils device-mapper-persistent-data lvm2 #2.下載docker-ce官方的yum源配置文件,上面操做了 這裏就不操做了 # yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo # yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo #3.禁用docker-c-edge源配edge是不開發版,不穩定,下載stable版 yum-config-manager --disable docker-ce-edge #4.更新本地YUM源緩存 yum makecache fast #5.安裝Docker-ce相應版本 yum -y install docker-ce #6.配置daemon, 由於kubelet的啓動環境變量要與docker的cgroup-driver驅動相同,如下是官方推薦處理方式 #因爲國內拉取鏡像較慢,配置文件最後追加了阿里雲鏡像加速配置。 cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ], "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"] } EOF #7.設置開機自啓動 systemctl restart docker && systemctl enable docker && systemctl status docker
運行hello world驗證github
[root@localhost ~]# docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world 9a0669468bf7: Pull complete Digest: sha256:0e06ef5e1945a718b02a8c319e15bae44f47039005530bc617a5d071190ed3fc Status: Downloaded newer image for hello-world:latest Hello from Docker! This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash Share images, automate workflows, and more with a free Docker ID: https://cloud.docker.com/ For more examples and ideas, visit: https://docs.docker.com/engine/userguide/
使用DaoCloud加速器(能夠跳過這一步)web
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://0d236e3f.m.daocloud.io # docker version >= 1.12 # {"registry-mirrors": ["http://0d236e3f.m.daocloud.io"]} # Success. # You need to restart docker to take effect: sudo systemctl restart docker systemctl restart docker
在全部機器安裝kubectl kubelet kubeadm kubernetes-cnidocker
yum list kubectl kubelet kubeadm kubernetes-cni #查看可安裝的包 已加載插件:fastestmirror Loading mirror speeds from cached hostfile * base: mirrors.tuna.tsinghua.edu.cn * extras: mirrors.sohu.com * updates: mirrors.sohu.com #顯示可安裝的軟件包 kubeadm.x86_64 1.14.3-0 kubernetes kubectl.x86_64 1.14.3-0 kubernetes kubelet.x86_64 1.14.3-0 kubernetes kubernetes-cni.x86_64 0.7.5-0 kubernetes [root@localhost ~]# #而後安裝kubectl kubelet kubeadm kubernetes-cni yum install -y kubectl kubelet kubeadm kubernetes-cni # Kubelet負責與其餘節點集羣通訊,並進行本節點Pod和容器生命週期的管理。 # Kubeadm是Kubernetes的自動化部署工具,下降了部署難度,提升效率。 # Kubectl是Kubernetes集羣管理工具
修改kubelet配置文件(可不操做)json
vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf #或者在以下目錄可不操做 /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf # 修改一行 Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" # 添加一行 Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0" #從新加載配置 systemctl daemon-reload
#1.命令補全
yum install -y bash-completion source /usr/share/bash-completion/bash_completion source <(kubectl completion bash) echo "source <(kubectl completion bash)" >> ~/.bashrc
#啓動全部主機上的kubelet服務
systemctl enable kubelet && systemctl start kubelet
kubeadm init主要執行了如下操做:bootstrap
[init]:指定版本進行初始化操做
[preflight] :初始化前的檢查和下載所須要的Docker鏡像文件
[kubelet-start]:生成kubelet的配置文件」/var/lib/kubelet/config.yaml」,沒有這個文件kubelet沒法啓動,因此初始化以前的kubelet實際上啓動失敗。
[certificates]:生成Kubernetes使用的證書,存放在/etc/kubernetes/pki目錄中。
[kubeconfig] :生成 KubeConfig 文件,存放在/etc/kubernetes目錄中,組件之間通訊須要使用對應文件。
[control-plane]:使用/etc/kubernetes/manifest目錄下的YAML文件,安裝 Master 組件。
[etcd]:使用/etc/kubernetes/manifest/etcd.yaml安裝Etcd服務。
[wait-control-plane]:等待control-plan部署的Master組件啓動。
[apiclient]:檢查Master組件服務狀態。
[uploadconfig]:更新配置
[kubelet]:使用configMap配置kubelet。
[patchnode]:更新CNI信息到Node上,經過註釋的方式記錄。
[mark-control-plane]:爲當前節點打標籤,打了角色Master,和不可調度標籤,這樣默認就不會使用Master節點來運行Pod。
[bootstrap-token]:生成token記錄下來,後邊使用kubeadm join往集羣中添加節點時會用到
[addons]:安裝附加組件CoreDNS和kube-proxyubuntu
[root@M-kube12 kubernetes]# kubectl get node NAME STATUS ROLES AGE VERSION m-kube12 NotReady master 3m40s v1.14.3 # STATUS顯示的狀態仍是不可用 [root@M-kube12 kubernetes]# kubectl -n kube-system get pod NAME READY STATUS RESTARTS AGE coredns-8686dcc4fd-fmlsh 0/1 Pending 0 3m40s coredns-8686dcc4fd-m22j7 0/1 Pending 0 3m40s etcd-m-kube12 1/1 Running 0 2m59s kube-apiserver-m-kube12 1/1 Running 0 2m53s kube-controller-manager-m-kube12 1/1 Running 0 2m33s kube-proxy-4kg8d 1/1 Running 0 3m40s kube-scheduler-m-kube12 1/1 Running 0 2m45s [root@M-kube12 kubernetes]# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health":"true"}
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml #版本信息:quay.io/coreos/flannel:v0.11.0-amd64 cat kube-flannel.yml | grep image cat kube-flannel.yml | grep 10.244 sed -i 's#quay.io/coreos/flannel:v0.11.0-amd64#willdockerhub/flannel:v0.11.0-amd64#g' kube-flannel.yml #若是網絡比較好,可不修改 kubectl apply -f kube-flannel.yml #或者直接建立 kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml #等待一會 查看 node和pod 狀態所有爲Running [root@M-fana3 kubernetes]# kubectl get node NAME STATUS ROLES AGE VERSION m-fana3 Ready master 42m v1.14.3 #狀態正常了 [root@M-fana3 kubernetes]# kubectl -n kube-system get pod NAME READY STATUS RESTARTS AGE coredns-8686dcc4fd-2z6m2 1/1 Running 0 42m coredns-8686dcc4fd-4k7mm 1/1 Running 0 42m etcd-m-fana3 1/1 Running 0 41m kube-apiserver-m-fana3 1/1 Running 0 41m kube-controller-manager-m-fana3 1/1 Running 0 41m kube-flannel-ds-amd64-6zrzt 1/1 Running 0 109s kube-proxy-lc8d5 1/1 Running 0 42m kube-scheduler-m-fana3 1/1 Running 0 41m #若是遇到問題想以下狀況,有可能鏡像拉取失敗了, kubectl -n kube-system get pod NAME READY STATUS RESTARTS AGE coredns-8686dcc4fd-c9mw7 0/1 Pending 0 43m coredns-8686dcc4fd-l8fpm 0/1 Pending 0 43m kube-apiserver-m-kube12 1/1 Running 0 42m kube-controller-manager-m-kube12 1/1 Running 0 17m kube-flannel-ds-amd64-gcmmp 0/1 Init:ImagePullBackOff 0 11m kube-proxy-czzk7 1/1 Running 0 43m kube-scheduler-m-kube12 1/1 Running 0 42m #能夠經過 kubectl describe pod kube-flannel-ds-amd64-gcmmp --namespace=kube-system 查看pod狀態,看到最後報錯以下,能夠手動下載或者二進制安裝 Node-Selectors: beta.kubernetes.io/arch=amd64 Tolerations: :NoSchedule node.kubernetes.io/disk-pressure:NoSchedule node.kubernetes.io/memory-pressure:NoSchedule node.kubernetes.io/network-unavailable:NoSchedule node.kubernetes.io/not-ready:NoExecute node.kubernetes.io/pid-pressure:NoSchedule node.kubernetes.io/unreachable:NoExecute node.kubernetes.io/unschedulable:NoSchedule Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 11m default-scheduler Successfully assigned kube-system/kube-flannel-ds-amd64-gcmmp to m-kube12 Normal Pulling 11m kubelet, m-kube12 Pulling image "willdockerhub/flannel:v0.11.0-amd64" Warning FailedMount 7m27s kubelet, m-kube12 MountVolume.SetUp failed for volume "flannel-token-6g9n7" : couldn't propagate object cache: timed out waiting for the condition Warning FailedMount 7m27s kubelet, m-kube12 MountVolume.SetUp failed for volume "flannel-cfg" : couldn't propagate object cache: timed out waiting for the condition Warning Failed 4m21s kubelet, m-kube12 Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = context canceled Warning Failed 3m53s kubelet, m-kube12 Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: request canceled (Client.Timeout exceeded while awaiting headers) Warning Failed 3m16s kubelet, m-kube12 Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: TLS handshake timeout Warning Failed 3m16s (x3 over 4m21s) kubelet, m-kube12 Error: ErrImagePull Normal SandboxChanged 3m14s kubelet, m-kube12 Pod sandbox changed, it will be killed and re-created. Normal BackOff 2m47s (x6 over 4m21s) kubelet, m-kube12 Back-off pulling image "willdockerhub/flannel:v0.11.0-amd64" Warning Failed 2m47s (x6 over 4m21s) kubelet, m-kube12 Error: ImagePullBackOff Normal Pulling 2m33s (x4 over 7m26s) kubelet, m-kube12 Pulling image "willdockerhub/flannel:v0.11.0-amd64"
#1.master上執行,加入集羣命令 kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 \ --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d \ --experimental-control-plane --certificate-key 3044cb04c999706795b28c1d3dcd2305dcf181787d7c6537284341a985395c20 #2.拷貝kube到用戶目錄 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config #3.node上執行 加入集羣 #若是忘記node節點加入集羣的命令可使用kubeadm token create --print-join-command 查看 kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 \ --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d #4.驗證集羣狀態 kubectl -n kube-system get pod -o wide #查看pod運行狀況 kubectl get nodes -o wide #查看節點狀況 kubectl -n kube-system get svc #查看service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 16m ipvsadm -ln #查看代理規則
準備部署一個簡單的web服務來測試集羣。
cat > /opt/deployment-goweb.yaml << EOF apiVersion: apps/v1 kind: Deployment metadata: name: goweb spec: selector: matchLabels: app: goweb replicas: 4 template: metadata: labels: app: goweb spec: containers: - image: lingtony/goweb name: goweb ports: - containerPort: 8000 EOF #------------------------------------- cat > /opt/svc-goweb.yaml << EOF apiVersion: v1 kind: Service metadata: name: gowebsvc spec: selector: app: goweb ports: - name: default protocol: TCP port: 80 targetPort: 8000 EOF # -----------------------------------部署服務 kubectl apply -f deployment-goweb.yaml kubectl apply -f svc-goweb.yaml #--------------查看pod get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES goweb-6c569f884-4ln4s 1/1 Running 0 75s 10.244.1.2 n-kube15 <none> <none> goweb-6c569f884-jcnrs 1/1 Running 0 75s 10.244.1.3 n-kube15 <none> <none> goweb-6c569f884-njnzk 1/1 Running 0 75s 10.244.1.4 n-kube15 <none> <none> goweb-6c569f884-zxnrx 1/1 Running 0 75s 10.244.1.5 n-kube15 <none> <none> #--------查看服務 kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE gowebsvc ClusterIP 10.105.87.199 <none> 80/TCP 84s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 30m #-----訪問測試,能夠看到對SVC的請求會在pod之間負載 curl http://10.105.87.199/info # Hostname: goweb-6c569f884-jcnrs curl http://10.105.87.199/info # Hostname: goweb-6c569f884-4ln4s curl http://10.105.87.199/info # Hostname: goweb-6c569f884-zxnrx curl http://10.105.87.199/info # Hostname: goweb-6c569f884-njnzk curl http://10.105.87.199/info # Hostname: goweb-6c569f884-jcnrs curl http://10.105.87.199/info # Hostname: goweb-6c569f884-4ln4s curl http://10.105.87.199/info # Hostname: goweb-6c569f884-zxnrx curl http://10.105.87.199/info # Hostname: goweb-6c569f884-njnzk curl http://10.105.87.199/info # Hostname: goweb-6c569f884-jcnrs
默認是沒web界面的,能夠在master機器上安裝一個dashboard插件,實現經過web來管理。
dashboard項目的GitHub地址:https://github.com/kubernetes/dashboard/releases
準備的鏡像:
k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
我們能夠先從阿里鏡像庫拉取鏡像