kubeadm部署K8S1.14.x高可用集羣

kubeadm部署kubernetes-v1.14.3 HA集羣

  1. etcd集羣使用二進制部署,使用HTTP協議。
  2. 修改源碼,從新編譯kubeadm,調整證書時間爲10年。
  3. 基於saltstack部署ETCD以及系統優化。
  4. 安裝docker,參考阿里雲的docker參數進行優化。

一.初始化系統環境

  1. 升級內核,參考此文檔
  2. 設置主機名,並添加對應解析。
[root@kubeadm-master-01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.200.101 kubeadm-master-01
192.168.200.102 kubeadm-master-02
192.168.200.103 kubeadm-master-03
192.168.200.104 kubeadm-node-01
  1. 設置免密登陸
[root@ kubeadm-master-01 ~]# scp /etc/hosts kubeadm-master-02:/etc/
[root@ kubeadm-master-01 ~]# scp /etc/hosts kubeadm-master-03:/etc/
[root@ kubeadm-master-01~]# scp /etc/hosts  kubeadm-node-01:/etc/
[root@ kubeadm-master-01 ~]# ssh-keygen -t rsa
[root@ kubeadm-master-01 ~]# ssh-copy-id  kubeadm-master-01
[root@ kubeadm-master-01 ~]# ssh-copy-id  kubeadm-master-02
[root@ kubeadm-master-01 ~]# ssh-copy-id  kubeadm-master-03
[root@ kubeadm-master-01 ~]# ssh-copy-id kubeadm-node-01
  1. 安裝salt-ssh,以及docker-ce的阿里雲yum源。
[root@kubeadm-master-01 ~]# yum install -y https://mirrors.aliyun.com/saltstack/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
[root@kubeadm-master-01 ~]# sed -i "s/repo.saltstack.com/mirrors.aliyun.com\/saltstack/g" /etc/yum.repos.d/salt-latest.repo
[root@kubeadm-master-01 ~]# yum install -y salt-ssh git unzip p7zip psmisc socat wget
[root@kubeadm-master-01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@kubeadm-master-01 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  1. 配置kubernetes阿里雲的yum源。
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

二. 安裝部署etcd和kube-nginx

  1. 安裝docker,全部節點執行。
yum list docker-ce.x86_64 --showduplicates | sort -r
yum install docker-ce-18.09.2
  1. 修改docker配置文件,全部節點執行。
mkdir /etc/docker
cat <<EOF > /etc/docker/daemon.json
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "100m",
        "max-file": "10"
    },
    "bip": "169.254.123.1/24",
    "oom-score-adjust": -1000,
    "registry-mirrors": ["https://fz5yth0r.mirror.aliyuncs.com"],
    "storage-driver": "overlay2",
    "storage-opts":["overlay2.override_kernel_check=true"]
}
EOF
  1. 啓動docker,全部節點執行。
systemctl enable docker
systemctl start docker
  1. 安裝kubectl kubelet kebeadm, 全部節點均需執行
yum install kubelet-1.14.3 kubeadm-1.14.3 kubectl-1.14.3 -y
#配置kubelet,並啓動,全部節點執行
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1"
systemctl enable kubelet
systemctl start kubelet
  1. 安裝ETCD

5.1 準備二進制文件node

git clone https://github.com/sky-daiji/salt-k8s-ha-v2.git
cd salt-k8s-ha/
mv * /srv/
/bin/cp /srv/roster /etc/salt/roster
/bin/cp /srv/master /etc/salt/master
#下載etcd的二進制文件
https://github.com/etcd-io/etcd/releases
cd /srv/salt/k8s/
mkdir /srv/salt/k8s/files/etcd-v3.3.13-linux-amd64 -p
#將下載後的etcd二進制文件,存放到這個目錄下

5.2 Salt SSH管理的機器以及角色分配linux

  • k8s-role: 用來設置K8S的角色
  • etcd-role: 用來設置etcd的角色,若是隻須要部署一個etcd,只須要在一臺機器上設置便可
  • etcd-name: 若是對一臺機器設置了etcd-role就必須設置etcd-name
vim /etc/salt/roster

kubeadm-master-01:
  host: 192.168.200.101
  user: root
  priv: /root/.ssh/id_rsa
  minion_opts:
    grains:
      etcd-role: node
      etcd-name: etcd-01

kubeadm-master-02:
  host: 192.168.200.102
  user: root
  priv: /root/.ssh/id_rsa
  minion_opts:
    grains:
      etcd-role: node
      etcd-name: etcd-02

kubeadm-master-03:
  host: 192.168.200.103
  user: root
  priv: /root/.ssh/id_rsa
  minion_opts:
    grains:
      etcd-role: node
      etcd-name: etcd-03

kubeadm-node-01:
  host: 192.168.200.104
  user: root
  priv: /root/.ssh/id_rsa
  minion_opts:
    grains:
      k8s-role: node

5.3 修改對應的配置參數,本項目使用Salt Pillar保存配置,本配置只用了安裝ETCD,其餘參數能夠不用修改。nginx

vim /srv/pillar/k8s.sls
#設置Master的IP地址(必須修改)
MASTER_IP_M1: "192.168.200.101"
MASTER_IP_M2: "192.168.200.102"
MASTER_IP_M3: "192.168.200.103"
#設置Master的HOSTNAME完整的FQDN名稱(必須修改)
MASTER_H1: "kubeadm-master-01"
MASTER_H2: "kubeadm-master-02"
MASTER_H3: "kubeadm-master-03"

#KUBE-APISERVER的反向代理地址端口
KUBE_APISERVER: "https://127.0.0.1:8443"

#設置ETCD集羣訪問地址(必須修改)
ETCD_ENDPOINTS: "http://192.168.200.101:2379,http://192.168.200.102:2379,http://192.168.200.103:2379"

FLANNEL_ETCD_PREFIX: "/kubernetes/network"

#設置ETCD集羣初始化列表(必須修改)
ETCD_CLUSTER: "etcd-01=http://192.168.200.101:2380,etcd-02=http://192.168.200.102:2380,etcd-03=http://192.168.200.103:2380"

#經過Grains FQDN自動獲取本機IP地址,請注意保證主機名解析到本機IP地址
NODE_IP: {{ grains['fqdn_ip4'][0] }}
HOST_NAME: {{ grains['fqdn'] }}

#設置BOOTSTARP的TOKEN,能夠本身生成
BOOTSTRAP_TOKEN: "be8dad.da8a699a46edc482"
TOKEN_ID: "be8dad"
TOKEN_SECRET: "da8a699a46edc482"
ENCRYPTION_KEY: "8eVtmpUpYjMvH8wKZtKCwQPqYRqM14yvtXPLJdhu0gA="

#配置Service IP地址段
SERVICE_CIDR: "10.1.0.0/16"

#Kubernetes服務 IP (從 SERVICE_CIDR 中預分配)
CLUSTER_KUBERNETES_SVC_IP: "10.1.0.1"

#Kubernetes DNS 服務 IP (從 SERVICE_CIDR 中預分配)
CLUSTER_DNS_SVC_IP: "10.1.0.2"

#設置Node Port的端口範圍
NODE_PORT_RANGE: "20000-40000"

#設置POD的IP地址段
POD_CIDR: "10.2.0.0/16"

#設置集羣的DNS域名
CLUSTER_DNS_DOMAIN: "cluster.local."

#設置Docker Registry地址
#DOCKER_REGISTRY: "https://192.168.150.135:5000"

#設置Master的VIP地址(必須修改)
MASTER_VIP: "192.168.150.253"

#設置網卡名稱,必定要改
VIP_IF: "eth0"

5.4 執行SaltStack狀態git

  • 測試Salt SSH聯通性
salt-ssh '*' test.ping
  • 安裝etcd
salt-ssh -L 'linux-node1,linux-node2,linux-node3' state.sls k8s.etcd
  1. 安裝kube-nginx
salt-ssh -L 'kubeadm-master-01,kubeadm-master-02,kubeadm-master-03' state.sls k8s.modules.nginx

三. 安裝kubernetes集羣

  1. 配置kubeadm-config.yaml
vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.3
#useHyperKubeImage: true
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
apiServer:
  certSANs:
    - "127.0.0.1"
networking:
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16

controlPlaneEndpoint: 127.0.0.1:8443

etcd:
  external:
    endpoints:
      - http://192.168.200.101:2379
      - http://192.168.200.102:2379
      - http://192.168.200.103:2379
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
  scheduler: rr
  syncPeriod: 10s
  1. 拉取鏡像
kubeadm config images pull --config kubeadm-config.yaml
  1. 初始化集羣
kubeadm init --config kubeadm-config.yaml
  1. 初始化完成以後,輸出以下。
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe \
    --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485 \
    --experimental-control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe \
    --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485
  1. 拷貝證書到其餘master節點
openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text
USER=root
CONTROL_PLANE_IPS="192.168.200.102 192.168.200.103"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
# scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:/etc/kubernetes/pki/etcd/ca.crt
# scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:/etc/kubernetes/pki/etcd/ca.key
  1. 在其餘的master節點上執行,此命令就是最上面的那個輸出結果中獲取
kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe     --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485     --experimental-control-plane
  1. 部署flannel
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yaml
kubectl apply -f flannel.yaml
  1. 查看集羣
[root@kubeadm-master-01 addons]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-1               Healthy   {"health":"true"}
etcd-0               Healthy   {"health":"true"}
etcd-2               Healthy   {"health":"true"}

四.添加node節點

  • 1.設置SSH無密碼登陸,而且在 /etc/hosts 中繼續增長對應的解析。確保全部節點都能解析。
  • 2.在 /etc/salt/roster 裏面,增長對應的機器。
  • 3.安裝docker以及kubeadm、kubelet、kubectl等
  • 4.配置docker以及kubelet的啓動參數
  • 3.執行SaltStack狀態
#在master節點執行
ssh-copy-id kubeadm-node-01
scp /etc/hosts kubeadm-node-01:/etc/
salt-ssh -L 'kubeadm-node-01' state.sls k8s.modules.base-dir
salt-ssh -L 'kubeadm-node-01' state.sls k8s.modules.nginx
systemctl status kube-nginx
#在node節點上執行
配置kubernetes阿里雲的yum源
安裝docker
kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe \
    --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485
#以防止外部惡意的節點進入集羣。每一個token自生成起24小時後過時。屆時若是須要加入新的節點,則須要從新生成新的join token,請使用下面的命令生成,在master上執行:
kubeadm token create --print-join-command

部署metrics-server

kubectl apply -f /srv/addons/metrics-server/metrics-server-kubeadm.yamlgithub

因爲kubeadm的安裝方式跟二進制稍有區別,須要修改部門啓動參數以及配置。主要增長的啓動參數以下所示:docker

command:
        - /metrics-server
        - --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname

若是 此文檔對您幫助很大,您能夠掃下方二維碼隨意打賞,就當是請我喝杯茶或是咖啡,將不勝感激!
json

相關文章
相關標籤/搜索