使用kubeadm在CentOS上搭建Kubernetes1.14.3集羣

練習環境說明:參考1 參考2html

主機名稱 IP地址 部署軟件 備註
M-kube12 192.168.10.12 master+etcd+docker+keepalived+haproxy master
M-kube13 192.168.10.13 master+etcd+docker+keepalived+haproxy master
M-kube14 192.168.10.14 master+etcd+docker+keepalived+haproxy master
N-kube15 192.168.10.15 docker+node node
N-kube16 192.168.10.16 docker+node node
VIP 192.168.10.100   VIP

1.1 環境準備

# 1、關閉防火牆,SELinux,安裝基礎包
yum install -y net-tools conntrack-tools wget vim  ntpdate libseccomp libtool-ltdl lrzsz        #在全部的機器上執行,安裝基本命令

systemctl stop firewalld && systemctl disable firewalld     #執行關閉防火牆和SELinux

sestatus    #查看selinux狀態
setenforce 0        #臨時關閉selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

swapoff -a          #關閉swap
sed -i 's/.*swap.*/#&/' /etc/fstab

# 2、設置免密登錄
ssh-keygen -t rsa       #配置免密登錄
ssh-copy-id <ip地址>      #拷貝密鑰

# 3、更改國內yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.$(date +%Y%m%d)
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
#docker源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

#配置國內Kubernetes源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum clean all && yum makecache -y

#----------------------
[root@localhost ~]#  cat >> /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

# 4、配置內核參數,將橋接的IPv4流量傳遞到IPtables鏈
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl --system

# 5.配置文件描述數
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
echo "* soft nproc 65536"  >> /etc/security/limits.conf
echo "* hard nproc 65536"  >> /etc/security/limits.conf
echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf
echo "* hard memlock  unlimited"  >> /etc/security/limits.conf

# 6.加載IPVS模塊
yum install ipset ipvsadm -y
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
#執行腳本
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#參考別人的
cat << EOF > /etc/sysconfig/modules/ipvs.modules 
#!/bin/bash
ipvs_modules_dir="/usr/lib/modules/\`uname -r\`/kernel/net/netfilter/ipvs"
for i in \`ls \$ipvs_modules_dir | sed  -r 's#(.*).ko.*#\1#'\`; do
    /sbin/modinfo -F filename \$i  &> /dev/null
    if [ \$? -eq 0 ]; then
        /sbin/modprobe \$i
    fi
done
EOF

chmod +x /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules
 

1.2 配置keepalived

yum install -y keepalived

#10.12機器上配置

cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.10.100:6444"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 100
    priority 100
    advert_int 1
    mcast_src_ip 192.168.10.12
    nopreempt
    authentication {
        auth_type PASS
        auth_pass fana123
    }
    unicast_peer {
        192.168.10.13
        192.168.10.14
    }
    virtual_ipaddress {
        192.168.10.100/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

#13機器keepalived配置
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.10.100:6444"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 100
    priority 90
    advert_int 1
    mcast_src_ip 192.168.10.13
    nopreempt
    authentication {
        auth_type PASS
        auth_pass fana123
    }
    unicast_peer {
        192.168.10.12
        192.168.10.14
    }
    virtual_ipaddress {
        192.168.10.100/24
    }
    track_script {
        CheckK8sMaster
    }
}
EOF

#14機器上keepalived配置
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.10.100:6444"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 100
    priority 80
    advert_int 1
    mcast_src_ip 192.168.10.14
    nopreempt
    authentication {
        auth_type PASS
        auth_pass fana123
    }
    unicast_peer {
        192.168.10.12
        192.168.10.13
    }
    virtual_ipaddress {
        192.168.10.100/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

#啓動keepalived
systemctl restart keepalived && systemctl enable keepalived

1.3 配置haproxy

yum install -y haproxy

#13機器上配置
cat << EOF > /etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

defaults
    mode                    tcp
    log                     global
    retries                 3
    timeout connect         10s
    timeout client          1m
    timeout server          1m

frontend kubernetes
    bind *:6444
    mode tcp
    default_backend kubernetes-master

backend kubernetes-master
    balance roundrobin
    server M-kube12 192.168.10.12:6443 check maxconn 2000
    server M-kube13 192.168.10.13:6443 check maxconn 2000
    server M-kube14 192.168.10.14:6443 check maxconn 2000
EOF

#12,13,和 14機器上配置都同樣

# 啓動haproxy
systemctl enable haproxy && systemctl start haproxy
也能夠用容器的方式部署

Copy
# haproxy啓動腳本
mkdir -p /data/lb
cat > /data/lb/start-haproxy.sh << "EOF"
#!/bin/bash
MasterIP1=192.168.10.12
MasterIP2=192.168.10.13
MasterIP3=192.168.10.14
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
        -e MasterIP1=$MasterIP1 \
        -e MasterIP2=$MasterIP2 \
        -e MasterIP3=$MasterIP3 \
        -e MasterPort=$MasterPort \
        wise2c/haproxy-k8s
EOF

#keepalived啓動腳本
cat > /data/lb/start-keepalived.sh << "EOF"
#!/bin/bash
VIRTUAL_IP=192.168.10.100
INTERFACE=ens33
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
        --net=host --cap-add=NET_ADMIN \
        -e VIRTUAL_IP=$VIRTUAL_IP \
        -e INTERFACE=$INTERFACE \
        -e CHECK_PORT=$CHECK_PORT \
        -e RID=$RID \
        -e VRID=$VRID \
        -e NETMASK_BIT=$NETMASK_BIT \
        -e MCAST_GROUP=$MCAST_GROUP \
        wise2c/keepalived-k8s
EOF

#把腳本拷貝到13和14機器上,而後啓動
sh /data/lb/start-haproxy.sh && sh /data/lb/start-keepalived.sh

docker ps #能夠看到容器的啓動狀態,相關配置文件能夠進入容器查看

 

1.4 配置etcd

14.1 在10.12機器上配置etcd證書

#下載cfssl包
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
#設置cfssl環境
chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
export PATH=/usr/local/bin:$PATH

#配置CA文件(IP地址爲etc節點的IP)
mkdir /root/ssl && cd /root/ssl

cat >  ca-config.json <<EOF
{
"signing": {
"default": {
  "expiry": "8760h"
},
"profiles": {
  "kubernetes-Soulmate": {
    "usages": [
        "signing",
        "key encipherment",
        "server auth",
        "client auth"
    ],
    "expiry": "8760h"
  }
}
}
}
EOF

#--------------------------------------------------------#

cat >  ca-csr.json <<EOF
{
"CN": "kubernetes-Soulmate",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
  "C": "CN",
  "ST": "shanghai",
  "L": "shanghai",
  "O": "k8s",
  "OU": "System"
}
]
}
EOF

#--------------------------------------------------------#

cat > etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.10.12",
    "192.168.10.13",
    "192.168.10.14"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "shanghai",
      "L": "shanghai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

#--------------------------------------------------------#
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd
  
#將10.13的etcd證書分發到14,15機器上

mkdir -p /etc/etcd/ssl && cp *.pem /etc/etcd/ssl/

ssh -n 192.168.10.13 "mkdir -p /etc/etcd/ssl && exit"
ssh -n 192.168.10.14 "mkdir -p /etc/etcd/ssl && exit"

scp -r /etc/etcd/ssl/*.pem 192.168.10.13:/etc/etcd/ssl/
scp -r /etc/etcd/ssl/*.pem 192.168.10.14:/etc/etcd/ssl/

 

1.4.2 在3臺主節點上操做,安裝etcd並配置

yum install etcd -y
mkdir -p /var/lib/etcd
#10.12機器上操做
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name M-kube12 \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.10.12:2380 \
  --listen-peer-urls https://192.168.10.12:2380 \
  --listen-client-urls https://192.168.10.12:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.10.12:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
#10.13上機器操做
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name M-kube13 \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.10.13:2380 \
  --listen-peer-urls https://192.168.10.13:2380 \
  --listen-client-urls https://192.168.10.13:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.10.13:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
#10.14機器上操做
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name M-kube14 \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.10.14:2380 \
  --listen-peer-urls https://192.168.10.14:2380 \
  --listen-client-urls https://192.168.10.14:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.10.14:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
#添加自啓動
cp /etc/systemd/system/etcd.service /usr/lib/systemd/system/
systemctl daemon-reload && systemctl start etcd && systemctl enable etcd && systemctl status etcd
 #在etc節點上檢查
etcdctl --endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \
 --ca-file=/etc/etcd/ssl/ca.pem \
 --cert-file=/etc/etcd/ssl/etcd.pem \
 --key-file=/etc/etcd/ssl/etcd-key.pem  cluster-health
 #正常的話會有以下提示
member 1af68d968c7e3f22 is healthy: got healthy result from https://192.168.10.12:2379
member 55204c19ed228077 is healthy: got healthy result from https://192.168.10.14:2379
member e8d9a97b17f26476 is healthy: got healthy result from https://192.168.10.13:2379
cluster is healthy

 

1.5 安裝Docker

現在Docker分爲了Docker-CE和Docker-EE兩個版本,CE爲社區版即免費版,EE爲企業版即商業版。咱們選擇使用CE版。node

在全部的機器上安裝dockerlinux

yum安裝dockergit

#1.安裝yum源工具包
yum install -y yum-utils device-mapper-persistent-data lvm2

#2.下載docker-ce官方的yum源配置文件,上面操做了 這裏就不操做了
# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

#3.禁用docker-c-edge源配edge是不開發版,不穩定,下載stable版
yum-config-manager --disable docker-ce-edge
#4.更新本地YUM源緩存
yum makecache fast
#5.安裝Docker-ce相應版本
yum -y install docker-ce
#6.配置daemon, 由於kubelet的啓動環境變量要與docker的cgroup-driver驅動相同,如下是官方推薦處理方式
#因爲國內拉取鏡像較慢,配置文件最後追加了阿里雲鏡像加速配置。
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"]
}
EOF
#7.設置開機自啓動
systemctl restart docker && systemctl enable docker && systemctl status docker

 

運行hello world驗證github

[root@localhost ~]# docker run hello-world
Unable to find image 'hello-world:latest' locally
latest: Pulling from library/hello-world
9a0669468bf7: Pull complete
Digest: sha256:0e06ef5e1945a718b02a8c319e15bae44f47039005530bc617a5d071190ed3fc
Status: Downloaded newer image for hello-world:latest

Hello from Docker!
This message shows that your installation appears to be working correctly.

To generate this message, Docker took the following steps:
1. The Docker client contacted the Docker daemon.
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
3. The Docker daemon created a new container from that image which runs the
   executable that produces the output you are currently reading.
4. The Docker daemon streamed that output to the Docker client, which sent it
   to your terminal.

To try something more ambitious, you can run an Ubuntu container with:
$ docker run -it ubuntu bash

Share images, automate workflows, and more with a free Docker ID:
https://cloud.docker.com/

For more examples and ideas, visit:
https://docs.docker.com/engine/userguide/

 

1.6 安裝kubelet與kubeadm包

使用DaoCloud加速器(能夠跳過這一步)web

 
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://0d236e3f.m.daocloud.io
# docker version >= 1.12
# {"registry-mirrors": ["http://0d236e3f.m.daocloud.io"]}
# Success.
# You need to restart docker to take effect: sudo systemctl restart docker
systemctl restart docker

 

在全部機器安裝kubectl kubelet kubeadm kubernetes-cnidocker

yum list kubectl kubelet kubeadm kubernetes-cni     #查看可安裝的包
已加載插件:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.sohu.com
* updates: mirrors.sohu.com
#顯示可安裝的軟件包
kubeadm.x86_64                                    1.14.3-0                                              kubernetes
kubectl.x86_64                                    1.14.3-0                                             kubernetes
kubelet.x86_64                                    1.14.3-0                                              kubernetes
kubernetes-cni.x86_64                             0.7.5-0                                              kubernetes
[root@localhost ~]#

#而後安裝kubectl kubelet kubeadm kubernetes-cni
yum install -y kubectl kubelet kubeadm kubernetes-cni

# Kubelet負責與其餘節點集羣通訊,並進行本節點Pod和容器生命週期的管理。
# Kubeadm是Kubernetes的自動化部署工具,下降了部署難度,提升效率。
# Kubectl是Kubernetes集羣管理工具

 

修改kubelet配置文件(可不操做)json

vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf    #或者在以下目錄可不操做
/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
# 修改一行
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
# 添加一行
Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0"
#從新加載配置
systemctl daemon-reload
#1.命令補全 
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
#啓動全部主機上的kubelet服務 
systemctl enable kubelet && systemctl start kubelet 

 

1.7 初始化集羣

kubeadm init主要執行了如下操做:bootstrap

​ [init]:指定版本進行初始化操做
​ [preflight] :初始化前的檢查和下載所須要的Docker鏡像文件
​ [kubelet-start]:生成kubelet的配置文件」/var/lib/kubelet/config.yaml」,沒有這個文件kubelet沒法啓動,因此初始化以前的kubelet實際上啓動失敗。
​ [certificates]:生成Kubernetes使用的證書,存放在/etc/kubernetes/pki目錄中。
​ [kubeconfig] :生成 KubeConfig 文件,存放在/etc/kubernetes目錄中,組件之間通訊須要使用對應文件。
​ [control-plane]:使用/etc/kubernetes/manifest目錄下的YAML文件,安裝 Master 組件。
​ [etcd]:使用/etc/kubernetes/manifest/etcd.yaml安裝Etcd服務。
​ [wait-control-plane]:等待control-plan部署的Master組件啓動。
​ [apiclient]:檢查Master組件服務狀態。
​ [uploadconfig]:更新配置
​ [kubelet]:使用configMap配置kubelet。
​ [patchnode]:更新CNI信息到Node上,經過註釋的方式記錄。
​ [mark-control-plane]:爲當前節點打標籤,打了角色Master,和不可調度標籤,這樣默認就不會使用Master節點來運行Pod。
​ [bootstrap-token]:生成token記錄下來,後邊使用kubeadm join往集羣中添加節點時會用到
​ [addons]:安裝附加組件CoreDNS和kube-proxyubuntu

1.7.1 在10.12 機器上添加集羣初始化配置文件

參考:kubernetes

參考:kubeadm

kubeadm config print init-defaults > kubeadm-config.yaml    #這個命令能夠生成初始化配置文件而後修改,也能夠直接用下面的

# 1.建立初始化集羣配置文件
cat <<EOF > /etc/kubernetes/kubeadm-master.config
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.3
controlPlaneEndpoint: "192.168.10.100:6443"
imageRepository: registry.aliyuncs.com/google_containers  
apiServer:
  certSANs:
  - 192.168.10.12
  - 192.168.10.13
  - 192.168.10.14
  - 192.168.10.100
etcd:
  external:
    endpoints:
    - https://192.168.10.12:2379
    - https://192.168.10.13:2379
    - https://192.168.10.14:2379
    caFile: /etc/etcd/ssl/ca.pem
    certFile: /etc/etcd/ssl/etcd.pem
    keyFile: /etc/etcd/ssl/etcd-key.pem
networking:
  podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
EOF

#2.而後執行
kubeadm config images pull --config kubeadm-master.config   #能夠先執行這個提早下載鏡像
kubeadm init --config kubeadm-master.config --experimental-upload-certs | tee kubeadm-init.log
# 追加tee命令能夠將初始化日誌輸出到kubeadm-init.log中,添加--experimental-upload-certs參數能夠在後續執行加入節點時自動分發證書文件。

#3.初始化失敗後處理方法
kubeadm reset       #初始化失敗或者成功,均可以直接執行kubeadm reset命令清理集羣或節點
#或
rm -rf /etc/kubernetes/*.conf
rm -rf /etc/kubernetes/manifests/*.yaml
docker ps -a |awk '{print $1}' |xargs docker rm -f
systemctl  stop kubelet

#初始化正常的結果以下
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 \
    --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d \
    --experimental-control-plane --certificate-key 3044cb04c999706795b28c1d3dcd2305dcf181787d7c6537284341a985395c20

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use 
"kubeadm init phase upload-certs --experimental-upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 \
    --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d 
    
#5.而後拷貝文件
mkdir -p /root/.kube
cp -i /etc/kubernetes/admin.conf /root/.kube/config
chown $(id -u):$(id -g) /root/.kube/config      #若是是其餘用戶須要使用kubectl命令,須要拷貝到$HOME目錄,而後賦權

1.7.2 查看當前狀態

[root@M-kube12 kubernetes]# kubectl get node
NAME       STATUS     ROLES    AGE     VERSION
m-kube12   NotReady   master   3m40s   v1.14.3      # STATUS顯示的狀態仍是不可用

[root@M-kube12 kubernetes]# kubectl -n kube-system get pod
NAME                               READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-fmlsh           0/1     Pending   0          3m40s
coredns-8686dcc4fd-m22j7           0/1     Pending   0          3m40s
etcd-m-kube12                      1/1     Running   0          2m59s
kube-apiserver-m-kube12            1/1     Running   0          2m53s
kube-controller-manager-m-kube12   1/1     Running   0          2m33s
kube-proxy-4kg8d                   1/1     Running   0          3m40s
kube-scheduler-m-kube12            1/1     Running   0          2m45s

[root@M-kube12 kubernetes]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"} 

 

1.7.3 部署flannel網絡,在全部節點上執行

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#版本信息:quay.io/coreos/flannel:v0.11.0-amd64

cat kube-flannel.yml | grep image
cat kube-flannel.yml | grep 10.244

sed -i 's#quay.io/coreos/flannel:v0.11.0-amd64#willdockerhub/flannel:v0.11.0-amd64#g' kube-flannel.yml  #若是網絡比較好,可不修改

kubectl apply -f kube-flannel.yml

#或者直接建立
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml

#等待一會 查看 node和pod 狀態所有爲Running
[root@M-fana3 kubernetes]# kubectl get node              
NAME      STATUS   ROLES    AGE   VERSION
m-fana3   Ready    master   42m   v1.14.3       #狀態正常了
[root@M-fana3 kubernetes]# kubectl -n kube-system get pod
NAME                              READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-2z6m2          1/1     Running   0          42m
coredns-8686dcc4fd-4k7mm          1/1     Running   0          42m
etcd-m-fana3                      1/1     Running   0          41m
kube-apiserver-m-fana3            1/1     Running   0          41m
kube-controller-manager-m-fana3   1/1     Running   0          41m
kube-flannel-ds-amd64-6zrzt       1/1     Running   0          109s
kube-proxy-lc8d5                  1/1     Running   0          42m
kube-scheduler-m-fana3            1/1     Running   0          41m

#若是遇到問題想以下狀況,有可能鏡像拉取失敗了,
kubectl -n kube-system get pod                                          
NAME                               READY   STATUS                  RESTARTS   AGE
coredns-8686dcc4fd-c9mw7           0/1     Pending                 0          43m
coredns-8686dcc4fd-l8fpm           0/1     Pending                 0          43m
kube-apiserver-m-kube12            1/1     Running                 0          42m
kube-controller-manager-m-kube12   1/1     Running                 0          17m
kube-flannel-ds-amd64-gcmmp        0/1     Init:ImagePullBackOff   0          11m
kube-proxy-czzk7                   1/1     Running                 0          43m
kube-scheduler-m-kube12            1/1     Running                 0          42m

#能夠經過 kubectl describe pod kube-flannel-ds-amd64-gcmmp --namespace=kube-system 查看pod狀態,看到最後報錯以下,能夠手動下載或者二進制安裝
Node-Selectors:  beta.kubernetes.io/arch=amd64
Tolerations:     :NoSchedule
                 node.kubernetes.io/disk-pressure:NoSchedule
                 node.kubernetes.io/memory-pressure:NoSchedule
                 node.kubernetes.io/network-unavailable:NoSchedule
                 node.kubernetes.io/not-ready:NoExecute
                 node.kubernetes.io/pid-pressure:NoSchedule
                 node.kubernetes.io/unreachable:NoExecute
                 node.kubernetes.io/unschedulable:NoSchedule
Events:
  Type     Reason          Age                    From               Message
  ----     ------          ----                   ----               -------
  Normal   Scheduled       11m                    default-scheduler  Successfully assigned kube-system/kube-flannel-ds-amd64-gcmmp to m-kube12
  Normal   Pulling         11m                    kubelet, m-kube12  Pulling image "willdockerhub/flannel:v0.11.0-amd64"
  Warning  FailedMount     7m27s                  kubelet, m-kube12  MountVolume.SetUp failed for volume "flannel-token-6g9n7" : couldn't propagate object cache: timed out waiting for the condition
  Warning  FailedMount     7m27s                  kubelet, m-kube12  MountVolume.SetUp failed for volume "flannel-cfg" : couldn't propagate object cache: timed out waiting for the condition
  Warning  Failed          4m21s                  kubelet, m-kube12  Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = context canceled
  Warning  Failed          3m53s                  kubelet, m-kube12  Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: request canceled (Client.Timeout exceeded while awaiting headers)
  Warning  Failed          3m16s                  kubelet, m-kube12  Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: TLS handshake timeout
  Warning  Failed          3m16s (x3 over 4m21s)  kubelet, m-kube12  Error: ErrImagePull
  Normal   SandboxChanged  3m14s                  kubelet, m-kube12  Pod sandbox changed, it will be killed and re-created.
  Normal   BackOff         2m47s (x6 over 4m21s)  kubelet, m-kube12  Back-off pulling image "willdockerhub/flannel:v0.11.0-amd64"
  Warning  Failed          2m47s (x6 over 4m21s)  kubelet, m-kube12  Error: ImagePullBackOff
  Normal   Pulling         2m33s (x4 over 7m26s)  kubelet, m-kube12  Pulling image "willdockerhub/flannel:v0.11.0-amd64"

 

1.7.4 加入集羣后驗證

#1.master上執行,加入集羣命令
kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 \
    --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d \
    --experimental-control-plane --certificate-key 3044cb04c999706795b28c1d3dcd2305dcf181787d7c6537284341a985395c20
#2.拷貝kube到用戶目錄
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#3.node上執行 加入集羣
#若是忘記node節點加入集羣的命令可使用kubeadm token create --print-join-command 查看

kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 \
    --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d

#4.驗證集羣狀態
kubectl -n kube-system get pod -o wide  #查看pod運行狀況

kubectl get nodes -o wide #查看節點狀況

kubectl -n kube-system get svc  #查看service
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   16m

ipvsadm -ln     #查看代理規則

 

1.7.5 集羣測試

準備部署一個簡單的web服務來測試集羣。

cat > /opt/deployment-goweb.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: goweb
spec:
  selector:
    matchLabels:
      app: goweb
  replicas: 4
  template:
    metadata:
      labels:
        app: goweb
    spec: 
      containers: 
      - image: lingtony/goweb
        name: goweb
        ports: 
        - containerPort: 8000
EOF

#-------------------------------------

cat > /opt/svc-goweb.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: gowebsvc
spec:
  selector:
    app: goweb
  ports:
  - name: default
    protocol: TCP
    port: 80
    targetPort: 8000
EOF

# -----------------------------------部署服務
kubectl apply -f deployment-goweb.yaml
kubectl  apply -f svc-goweb.yaml
#--------------查看pod
get pod -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP           NODE       NOMINATED NODE   READINESS GATES
goweb-6c569f884-4ln4s   1/1     Running   0          75s   10.244.1.2   n-kube15   <none>           <none>
goweb-6c569f884-jcnrs   1/1     Running   0          75s   10.244.1.3   n-kube15   <none>           <none>
goweb-6c569f884-njnzk   1/1     Running   0          75s   10.244.1.4   n-kube15   <none>           <none>
goweb-6c569f884-zxnrx   1/1     Running   0          75s   10.244.1.5   n-kube15   <none>           <none>

#--------查看服務
kubectl  get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
gowebsvc     ClusterIP   10.105.87.199   <none>        80/TCP    84s
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP   30m

#-----訪問測試,能夠看到對SVC的請求會在pod之間負載
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-jcnrs
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-4ln4s
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-zxnrx
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-njnzk
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-jcnrs
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-4ln4s
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-zxnrx
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-njnzk
curl http://10.105.87.199/info  #  Hostname: goweb-6c569f884-jcnrs           

 

1.8 配置dashboard

默認是沒web界面的,能夠在master機器上安裝一個dashboard插件,實現經過web來管理。

dashboard項目的GitHub地址:https://github.com/kubernetes/dashboard/releases

準備的鏡像:

k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1

我們能夠先從阿里鏡像庫拉取鏡像

#1.下載鏡像
vim /etc/kubernetes/dashboard.sh

#!/bin/bash
DASHDOARD_VERSION=v1.10.1
HEAPSTER_VERSION=v1.5.4
GRAFANA_VERSION=v5.0.4
INFLUXDB_VERSION=v1.5.2
username=registry.cn-hangzhou.aliyuncs.com/google_containers
images=(
        kubernetes-dashboard-amd64:${DASHDOARD_VERSION}
        heapster-grafana-amd64:${GRAFANA_VERSION}
        heapster-amd64:${HEAPSTER_VERSION}
        heapster-influxdb-amd64:${INFLUXDB_VERSION}
        )
for image in ${images[@]}
do
docker pull ${username}/${image}
docker tag ${username}/${image} k8s.gcr.io/${image}
docker rmi ${username}/${image}
done

#2.準備yaml文件,下載GitHub上的文件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml

#3.修改配置文件在最下面
vim /etc/kubernetes/kubernetes-dashboard.yaml
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort        ## 新增把 Dashboard 端口暴露出來,方便外部訪問
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001    ## 暴露出的端口範圍:30000-32767
  selector:
    k8s-app: kubernetes-dashboard
#---------yaml中有涉及的image版本,根據實際狀況修改-------#
spec:
      containers:
      - name: kubernetes-dashboard
        image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
        imagePullPolicy: IfNotPresent   #能夠添加拉取策略

#4.建立服務帳號和集羣角色綁定配置文件
cat << EOF > /etc/kubernetes/kubernetes-dashboard-admin-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-admin
  namespace: kube-system
  
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kube-system
EOF

#5.執行安裝
kubectl apply -f kubernetes-dashboard.yaml
kubectl apply -f kubernetes-dashboard-admin-rbac.yaml

#6.執行完成後查看pod是否正常運行
kubectl get pod -n kube-system |grep kubernetes-dashboard
#顯示以下
kubernetes-dashboard-6cfdc589c7-c6qmq   1/1     Running   0          99m

#7.查看服務暴露的端口號
kubectl get service  -n kube-system |grep kubernetes-dashboard
#顯示以下
kubernetes-dashboard-external   NodePort    10.96.149.139   <none>        443:30001/TCP           99m

#8.查看 Token
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep kubernetes-dashboard-admin-token | awk '{print $1}')
#顯示以下
Name:         kubernetes-dashboard-admin-token-2rrq2
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard-admin
              kubernetes.io/service-account.uid: aeff190c-93eb-11e9-904c-000c29959a05

Type:  kubernetes.io/service-account-token

Data
====
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi0ycnJxMiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImFlZmYxOTBjLTkzZWItMTFlOS05MDRjLTAwMGMyOTk1OWEwNSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.VcyOeYta1PrleZ4PDn_mvxtf8jiAo9DOboL5inMk9QJY1raDCI7EOHaVDF1OPLgYR2JqTVDLjshLwFkm3I4zO49piApgxd9fLrIA1RW30trNG9XxfG8P5O00RuYQxdRGfIeYcSdFgeroKdHY10wgBsAFbd8DWkc_IyYPHe-gnn_Y2U5Hd1tPZGOk_ZvZXhjlQd25vYouBI1RBEVUlcug5HaDGqHH_2yYmba4AFI2rVjsnxNbeSca5Ri9384vCsJQSkvh1uKMQTXuUXZb3z6x2nKKx9vA7LxoHYKJkyLMNbvKqL5QYpS3t9aVuzYTWVuUxEunnmEcT9R5oqceGwCwtg
ca.crt:     1025 bytes

#9.在瀏覽器輸入<master_ip:端口> 就能夠訪問Dashboard了,而後kubernetes儀表板,選擇令牌,輸入查到的Token

 

參考:https://www.cnblogs.com/fan-gx/p/11055904.html

相關文章
相關標籤/搜索