kubeadm部署高可用K8S-1.15

節點信息node

IP 角色  操做系統 備註
10.0.0.1 master centos7.6 keepalived
10.0.0.2 master centos7.6 keepalived
10.0.0.3 master centos7.6 keepalived

 

 

 

 

 

1.環境準備linux

應事先完成centos7的基礎環境配置,包括關閉selinux、關閉防火牆、配置時間同步、配置節點ssh免密。關閉NetworkManager等。docker

1.1 升級系統內核json

[root@k8s-master1 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo \
 && wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@k8s-master1 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
[root@k8s-master1 ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
[root@k8s-master1 ~]# yum --disablerepo="*" --enablerepo="elrepo-kernel" list available #lt爲長期穩定支持版,ml爲最新穩定版
kernel-lt.x86_64 -.el7.elrepo elrepo-kernel
......
kernel-ml.x86_64 -.el7.elrepo elrepo-kernel
......
#安裝內核
[root@k8s-master1 ~]# yum --enablerepo=elrepo-kernel install kernel-lt-devel kernel-lt -y
#設置重新安裝內核啓動
[root@k8s-master1 ~]# awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
CentOS Linux (4.4.197-1.el7.elrepo.x86_64) 7 (Core)
CentOS Linux (3.10.0-957.el7.x86_64) 7 (Core)
CentOS Linux (0-rescue-b4c601a613824f9f827cb9787b605efb) 7 (Core)
[root@k8s-master1 ~]# grub2-set-default 0

1.2 優化內核參數bootstrap

[root@k8s-master1 ~]# cat << EOF | tee  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 0  #因爲tcp_tw_recycle與kubernetes的NAT衝突,必須關閉!不然會致使服務不通。
vm.swappiness = 0           #禁止使用 swap 空間,只有當系統 OOM 時才容許使用它
fs.inotify.max_user_instances = 512
fs.inotify.max_user_watches = 1280000
fs.file-max = 2000000
fs.nr_open = 2000000
net.ipv6.conf.all.disable_ipv6 = 1  #關閉不使用的ipv6協議棧,防止觸發docker BUG.
net.netfilter.nf_conntrack_max = 524288
EOF
[root@k8s-master1 ~]# cat >>/etc/sysctl.conf <<EOF
net.ipv4.ip_forward = 1
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog =  32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024  65535
EOF

sysctl -p

1.3 加載 內核ipvs相關模塊vim

[root@k8s-master1 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
> #!/bin/bash
> modprobe -- ip_vs
> modprobe -- ip_vs_rr
> modprobe -- ip_vs_wrr
> modprobe -- ip_vs_sh
> modprobe -- nf_conntrack_ipv4
> modprobe -- br_netfilter
> EOF
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# sh  /etc/sysconfig/modules/ipvs.modules 
[root@k8s-master1 ~]# lsmod  | grep ip_
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  19
ip_vs                 147456  25 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          114688  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              16384  2 xfs,ip_vs

1.4 關閉swap分區centos

[root@k8s-master1 ~]# swapoff -a 
[root@k8s-master1 ~]# sed -i 's/.*swap.*/#&/' /etc/fstab

2.安裝配置keepalivedapi

2.1 安裝keepalived跨域

#3個節點均安裝keepalived
[root@k8s-master1 ~]# yum -y install keepalived -y [root@k8s-master1 ~]# cp /etc/keepalived/keepalived.conf{,.bak} [root@k8s-master1 ~]# cat /etc/keepalived/keepalived.conf #注意virtual_router_id需一致 ! Configuration File for keepalived global_defs { router_id k8s-1 } vrrp_script CheckK8sMaster { script "curl -k https://127.0.0.1:6443/api" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state MASTER interface eth0 virtual_router_id 51 priority 200 advert_int 1 mcast_src_ip 10.0.0.1 nopreempt authentication { auth_type PASS auth_pass 378378 } unicast_peer { 10.0.0.2 10.0.0.3 } virtual_ipaddress { 10.0.0.10 } track_script { CheckK8sMaster } }

m2節點keepalived配置,m3的省略:安全

[root@k8s-master2 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id k8s-2
}

vrrp_script CheckK8sMaster {
    script "curl -k https://127.0.0.1:6443/api"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface  eth0
    virtual_router_id 51
    priority 150
    advert_int 1
    mcast_src_ip 10.0.0.2
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 378378
    }
    unicast_peer {

    10.0.0.1
    10.0.0.3
    }
    virtual_ipaddress {
        10.0.0.10
    }
    track_script {
        CheckK8sMaster
    }
}

啓動服務

[root@k8s-master1 ~]# systemctl enable keepalived && systemctl start keepalived

2.2 安裝部署K8S

安裝配置docker

K8S節點均需安裝
[root@k8s-master1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo [root@k8s-master1 ~]# yum list docker-ce --showduplicates | sort -r [root@k8s-master1 ~]# yum -y install docker-ce-18.09.7-3

配置docker

[root@k8s-master1 ~]# cat << EOF > /etc/docker/daemon.json
{
     "registry-mirrors":[
         "https://c6ai9izk.mirror.aliyuncs.com"
     ],
     "log-driver":"json-file",
     "log-opts":{
         "max-size":"100m"
     },
     "storage-driver":"overlay2",
     "storage-opts": [
     "overlay2.override_kernel_check=true"
     ],
     "live-restore": true,
     "exec-opts": [
     "native.cgroupdriver=systemd"
     ],
     "insecure-registries": ["reg.myhb.com","10.0.0.4"]
 }
EOF

注:daemon.json解析

{
    "authorization-plugins": [],   //訪問受權插件
    "data-root": "",   //docker數據持久化存儲的根目錄
    "dns": [],   //DNS服務器
    "dns-opts": [],   //DNS配置選項,如端口等
    "dns-search": [],   //DNS搜索域名
    "exec-opts": [],   //執行選項
    "exec-root": "",   //執行狀態的文件的根目錄
    "experimental": false,   //是否開啓試驗性特性
    "storage-driver": "",   //存儲驅動器
    "storage-opts": [],   //存儲選項
    "labels": [],   //鍵值對式標記docker元數據
    "live-restore": true,   //dockerd掛掉是否保活容器(避免了docker服務異常而形成容器退出)
    "log-driver": "",   //容器日誌的驅動器
    "log-opts": {},   //容器日誌的選項
    ,   //設置容器網絡MTU(最大傳輸單元)
    "pidfile": "",   //daemon PID文件的位置
    "cluster-store": "",   //集羣存儲系統的URL
    "cluster-store-opts": {},   //配置集羣存儲
    "cluster-advertise": "",   //對外的地址名稱
    ,   //設置每一個pull進程的最大併發
    ,   //設置每一個push進程的最大併發
    "default-shm-size": "64M",   //設置默認共享內存的大小
    ,   //設置關閉的超時時限(who?)
    "debug": true,   //開啓調試模式
    "hosts": [],   //監聽地址(?)
    "log-level": "",   //日誌級別
    "tls": true,   //開啓傳輸層安全協議TLS
    "tlsverify": true,   //開啓輸層安全協議並驗證遠程地址
    "tlscacert": "",   //CA簽名文件路徑
    "tlscert": "",   //TLS證書文件路徑
    "tlskey": "",   //TLS密鑰文件路徑
    "swarm-default-advertise-addr": "",   //swarm對外地址
    "api-cors-header": "",   //設置CORS(跨域資源共享-Cross-origin resource sharing)頭
    "selinux-enabled": false,   //開啓selinux(用戶、進程、應用、文件的強制訪問控制)
    "userns-remap": "",   //給用戶命名空間設置 用戶/組
    "group": "",   //docker所在組
    "cgroup-parent": "",   //設置全部容器的cgroup的父類(?)
    "default-ulimits": {},   //設置全部容器的ulimit
    "init": false,   //容器執行初始化,來轉發信號或控制(reap)進程
    "init-path": "/usr/libexec/docker-init",   //docker-init文件的路徑
    "ipv6": false,   //開啓IPV6網絡
    "iptables": false,   //開啓防火牆規則
    "ip-forward": false,   //開啓net.ipv4.ip_forward
    "ip-masq": false,   //開啓ip掩蔽(IP封包經過路由器或防火牆時重寫源IP地址或目的IP地址的技術)
    "userland-proxy": false,   //用戶空間代理
    "userland-proxy-path": "/usr/libexec/docker-proxy",   //用戶空間代理路徑
    "ip": "0.0.0.0",   //默認IP
    "bridge": "",   //將容器依附(attach)到橋接網絡上的橋標識
    "bip": "",   //指定橋接ip
    "fixed-cidr": "",   //(ipv4)子網劃分,即限制ip地址分配範圍,用以控制容器所屬網段實現容器間(同一主機或不一樣主機間)的網絡訪問
    "fixed-cidr-v6": "",   //(ipv6)子網劃分
    "default-gateway": "",   //默認網關
    "default-gateway-v6": "",   //默認ipv6網關
    "icc": false,   //容器間通訊
    "raw-logs": false,   //原始日誌(無顏色、全時間戳)
    "allow-nondistributable-artifacts": [],   //不對外分發的產品提交的registry倉庫
    "registry-mirrors": [],   //registry倉庫鏡像
    "seccomp-profile": "",   //seccomp配置文件
    "insecure-registries": [],   //非https的registry地址
    "no-new-privileges": false,   //禁止新優先級(??)
    "default-runtime": "runc",   //OCI聯盟(The Open Container Initiative)默認運行時環境
    ,   //內存溢出被殺死的優先級(-1000~1000)
    "node-generic-resources": ["NVIDIA-GPU=UUID1", "NVIDIA-GPU=UUID2"],   //對外公佈的資源節點
    "runtimes": {   //運行時
        "cc-runtime": {
            "path": "/usr/bin/cc-runtime"
        },
        "custom": {
            "path": "/usr/local/bin/my-runc-replacement",
            "runtimeArgs": [
                "--debug"
            ]
        }
    }
}

啓動docker

[root@k8s-master1 ~]# systemctl enable docker && systemctl restart docker && systemctl status docker

配置K8S源

[root@k8s-master1 ~]# cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled= 1
gpgcheck= 1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安裝K8S

[root@k8s-master1 ~]# yum list  kubelet kubeadm kubectl --showduplicates | sort -r
[root@k8s-master1 ~]# yum install -y kubelet-1.15.5-0 kubeadm-1.15.5-0 kubectl-1.15.5-0 ipvsadm ipset
##設置kubelet開機自啓動,注意:這一步不能直接執行 systemctl start kubelet,會報錯,成功初始化完後kubelet會自動起來
[root@k8s-master1 ~]# systemctl enable kubelet
#kubectl命令補全
[root@k8s-master1 ~]# source /usr/share/bash-completion/bash_completion
[root@k8s-master1 ~]# source <(kubectl completion bash)
[root@k8s-master1 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@k8s-master1 tmp]# kubeadm config print init-defaults > kubeadm-init.yaml
[root@k8s-master1 tmp]# cp kubeadm-init.yaml{,.bak}
[root@k8s-master1 tmp]# vim kubeadm-init.yaml
須要修改advertiseAddresscontrolPlaneEndpointimageRepositoryserviceSubnetkubernetesVersion
  1. advertiseAddressmaster1ip
  2. controlPlaneEndpointVIP+6443端口
  3. imageRepository修改成阿里的源
  4. serviceSubnet找網絡組要一段沒有使用的IP
  5. kubernetesVersion和上一步的版本一致
[root@k8s-master1 ~]# cat kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.0.0.1
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
controlPlaneEndpoint: "10.0.0.10:6443"
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.15.5
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
scheduler: {}

部署K8S

[root@k8s-master1 tmp]# kubeadm config images pull --config kubeadm-init.yaml
[root@k8s-master1 ~]# kubeadm init --config=kubeadm-init.yaml
[root@k8s-master1 tmp]# mkdir -p $HOME/.kube
[root@k8s-master1 tmp]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master1 tmp]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

添加其餘的master節點

在k8s-master1將證書文件拷貝至k8s-master二、k8s-master3節點
在k8s-master1上部署
#拷貝證書至k8s-master2節點
[root@k8s-master1 ~]# vim k8s-master-cert.sh
#!/bin/bash
USER=root
CONTROL_PLANE_IPS="k8s-master2 k8s-master3"
for host in ${CONTROL_PLANE_IPS}; do
    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
    scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
[root@k8s-master1 ~]# sh -x k8s-master-cert.sh
 #在k8s-master02上執行,注意注意--experimental-control-plane參數 
[root@k8s
-master02 ~]# kubeadm join --token abcdef.0123456789abcdef
\
> --discovery-token-ca-cert-hash sha256:gdfa5553064e75391e03eef75b8fa16ba121f5aheffe85e8187kk6207b610coo
\
> --control-plane

部署calico插件

[root@k8s-master1 tmp]# wget -c https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
 
#修改calico.yaml,修改CALICO_IPV4POOL_CIDR這個下面的vaule值。在前面設置的serviceSubnet的值
[root@k8s-master1 tmp]# cp calico.yaml{,.bak}
[root@k8s-master1 tmp]# vim calico.yaml
[root@k8s-master1 tmp]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.extensions/calico-node created
serviceaccount/calico-node created
deployment.extensions/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

[root@k8s-master1 tmp]# kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
k8s-master1   Ready      master   59m   v1.15.5
k8s-master2   Ready      master   25m   v1.15.5
k8s-master3   Ready      master   22m   v1.15.5

配置ipvs

[root@k8s-master1 ~]# kubectl edit cm kube-proxy -n kube-system
 
#重啓kube-proxy pod
[root@k8s-master1 ~]#  kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
pod "kube-proxy-5s6t5" deleted
pod "kube-proxy-6xjl5" deleted
pod "kube-proxy-h5q6x" deleted
pod "kube-proxy-44hjk" deleted
pod "kube-proxy-yc67g" deleted
pod "kube-proxy-6wmh9" deleted
 
#查看Kube-proxy pod狀態
[root@k8s-master1 ~]# kubectl get pod -n kube-system | grep kube-proxy
kube-proxy-5vh6s                           /     Running             82s
kube-proxy-4tp4d                           /     Running             2m2s
kube-proxy-5d8sg                           /     Running             114s
kube-proxy-l5cgw                           /     Running             97s
kube-proxy-s3v9f                           /     Running             106s
kube-proxy-4dfx7                           /     Running             79s
 
#查看是否開啓了ipvs
[root@k8s-master1 ~]# kubectl logs kube-proxy-5vh6s -n kube-system
I0727 :: server_others.go:] Using ipvs Proxier.
W0727 :: proxier.go:] clusterCIDR not specified, unable to distinguish between internal and external traffic
W0727 :: proxier.go:] IPVS scheduler not specified, use rr by default
I0727 :: server.go:] Version: v1.15.5
I0727 :: conntrack.go:] Setting nf_conntrack_max to
I0727 :: config.go:] Starting service config controller
I0727 :: config.go:] Starting endpoints config controller
I0727 :: controller_utils.go:] Waiting for caches to sync for endpoints config controller
I0727 :: controller_utils.go:] Waiting for caches to sync for service config controller
I0727 :: controller_utils.go:] Caches are synced for service config controller
I0727 :: controller_utils.go:] Caches are synced for endpoints config controller
[root@k8s-master1 ~]# kubectl logs kube-proxy-ssv94 -n kube-system  | grep "ipvs"
I0727 :: server_others.go:] Using ipvs Proxier.

 查看K8S組件狀態

[root@k8s-master1 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health":"true"}

 master節點去污

[root@k8s-master1 ~]# kubectl taint nodes --all node-role.kubernetes.io/master-
相關文章
相關標籤/搜索