[root@kubeadm-master-01 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.200.101 kubeadm-master-01 192.168.200.102 kubeadm-master-02 192.168.200.103 kubeadm-master-03 192.168.200.104 kubeadm-node-01
[root@ kubeadm-master-01 ~]# scp /etc/hosts kubeadm-master-02:/etc/ [root@ kubeadm-master-01 ~]# scp /etc/hosts kubeadm-master-03:/etc/ [root@ kubeadm-master-01~]# scp /etc/hosts kubeadm-node-01:/etc/ [root@ kubeadm-master-01 ~]# ssh-keygen -t rsa [root@ kubeadm-master-01 ~]# ssh-copy-id kubeadm-master-01 [root@ kubeadm-master-01 ~]# ssh-copy-id kubeadm-master-02 [root@ kubeadm-master-01 ~]# ssh-copy-id kubeadm-master-03 [root@ kubeadm-master-01 ~]# ssh-copy-id kubeadm-node-01
[root@kubeadm-master-01 ~]# yum install -y https://mirrors.aliyun.com/saltstack/yum/redhat/salt-repo-latest-2.el7.noarch.rpm [root@kubeadm-master-01 ~]# sed -i "s/repo.saltstack.com/mirrors.aliyun.com\/saltstack/g" /etc/yum.repos.d/salt-latest.repo [root@kubeadm-master-01 ~]# yum install -y salt-ssh git unzip p7zip psmisc socat wget [root@kubeadm-master-01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 [root@kubeadm-master-01 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
yum list docker-ce.x86_64 --showduplicates | sort -r yum install docker-ce-18.09.2
mkdir /etc/docker cat <<EOF > /etc/docker/daemon.json { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m", "max-file": "10" }, "bip": "169.254.123.1/24", "oom-score-adjust": -1000, "registry-mirrors": ["https://fz5yth0r.mirror.aliyuncs.com"], "storage-driver": "overlay2", "storage-opts":["overlay2.override_kernel_check=true"] } EOF
systemctl enable docker systemctl start docker
yum install kubelet-1.14.3 kubeadm-1.14.3 kubectl-1.14.3 -y #配置kubelet,並啓動,全部節點執行 vim /etc/sysconfig/kubelet KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1" systemctl enable kubelet systemctl start kubelet
5.1 準備二進制文件node
git clone https://github.com/sky-daiji/salt-k8s-ha-v2.git cd salt-k8s-ha/ mv * /srv/ /bin/cp /srv/roster /etc/salt/roster /bin/cp /srv/master /etc/salt/master #下載etcd的二進制文件 https://github.com/etcd-io/etcd/releases cd /srv/salt/k8s/ mkdir /srv/salt/k8s/files/etcd-v3.3.13-linux-amd64 -p #將下載後的etcd二進制文件,存放到這個目錄下
5.2 Salt SSH管理的機器以及角色分配linux
vim /etc/salt/roster kubeadm-master-01: host: 192.168.200.101 user: root priv: /root/.ssh/id_rsa minion_opts: grains: etcd-role: node etcd-name: etcd-01 kubeadm-master-02: host: 192.168.200.102 user: root priv: /root/.ssh/id_rsa minion_opts: grains: etcd-role: node etcd-name: etcd-02 kubeadm-master-03: host: 192.168.200.103 user: root priv: /root/.ssh/id_rsa minion_opts: grains: etcd-role: node etcd-name: etcd-03 kubeadm-node-01: host: 192.168.200.104 user: root priv: /root/.ssh/id_rsa minion_opts: grains: k8s-role: node
5.3 修改對應的配置參數,本項目使用Salt Pillar保存配置,本配置只用了安裝ETCD,其餘參數能夠不用修改。nginx
vim /srv/pillar/k8s.sls #設置Master的IP地址(必須修改) MASTER_IP_M1: "192.168.200.101" MASTER_IP_M2: "192.168.200.102" MASTER_IP_M3: "192.168.200.103" #設置Master的HOSTNAME完整的FQDN名稱(必須修改) MASTER_H1: "kubeadm-master-01" MASTER_H2: "kubeadm-master-02" MASTER_H3: "kubeadm-master-03" #KUBE-APISERVER的反向代理地址端口 KUBE_APISERVER: "https://127.0.0.1:8443" #設置ETCD集羣訪問地址(必須修改) ETCD_ENDPOINTS: "http://192.168.200.101:2379,http://192.168.200.102:2379,http://192.168.200.103:2379" FLANNEL_ETCD_PREFIX: "/kubernetes/network" #設置ETCD集羣初始化列表(必須修改) ETCD_CLUSTER: "etcd-01=http://192.168.200.101:2380,etcd-02=http://192.168.200.102:2380,etcd-03=http://192.168.200.103:2380" #經過Grains FQDN自動獲取本機IP地址,請注意保證主機名解析到本機IP地址 NODE_IP: {{ grains['fqdn_ip4'][0] }} HOST_NAME: {{ grains['fqdn'] }} #設置BOOTSTARP的TOKEN,能夠本身生成 BOOTSTRAP_TOKEN: "be8dad.da8a699a46edc482" TOKEN_ID: "be8dad" TOKEN_SECRET: "da8a699a46edc482" ENCRYPTION_KEY: "8eVtmpUpYjMvH8wKZtKCwQPqYRqM14yvtXPLJdhu0gA=" #配置Service IP地址段 SERVICE_CIDR: "10.1.0.0/16" #Kubernetes服務 IP (從 SERVICE_CIDR 中預分配) CLUSTER_KUBERNETES_SVC_IP: "10.1.0.1" #Kubernetes DNS 服務 IP (從 SERVICE_CIDR 中預分配) CLUSTER_DNS_SVC_IP: "10.1.0.2" #設置Node Port的端口範圍 NODE_PORT_RANGE: "20000-40000" #設置POD的IP地址段 POD_CIDR: "10.2.0.0/16" #設置集羣的DNS域名 CLUSTER_DNS_DOMAIN: "cluster.local." #設置Docker Registry地址 #DOCKER_REGISTRY: "https://192.168.150.135:5000" #設置Master的VIP地址(必須修改) MASTER_VIP: "192.168.150.253" #設置網卡名稱,必定要改 VIP_IF: "eth0"
5.4 執行SaltStack狀態git
salt-ssh '*' test.ping
salt-ssh -L 'linux-node1,linux-node2,linux-node3' state.sls k8s.etcd
salt-ssh -L 'kubeadm-master-01,kubeadm-master-02,kubeadm-master-03' state.sls k8s.modules.nginx
vim kubeadm-config.yaml apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration kubernetesVersion: v1.14.3 #useHyperKubeImage: true imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers apiServer: certSANs: - "127.0.0.1" networking: serviceSubnet: 10.96.0.0/12 podSubnet: 10.244.0.0/16 controlPlaneEndpoint: 127.0.0.1:8443 etcd: external: endpoints: - http://192.168.200.101:2379 - http://192.168.200.102:2379 - http://192.168.200.103:2379 --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs ipvs: scheduler: rr syncPeriod: 10s
kubeadm config images pull --config kubeadm-config.yaml
kubeadm init --config kubeadm-config.yaml
Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of control-plane nodes by copying certificate authorities and service account keys on each node and then running the following as root: kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe \ --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485 \ --experimental-control-plane Then you can join any number of worker nodes by running the following on each as root: kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe \ --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485
openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text USER=root CONTROL_PLANE_IPS="192.168.200.102 192.168.200.103" for host in ${CONTROL_PLANE_IPS}; do scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/ca.key "${USER}"@$host:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.key "${USER}"@$host:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:/etc/kubernetes/pki/ scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/ done # scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:/etc/kubernetes/pki/etcd/ca.crt # scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:/etc/kubernetes/pki/etcd/ca.key
kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485 --experimental-control-plane
mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yaml kubectl apply -f flannel.yaml
[root@kubeadm-master-01 addons]# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-1 Healthy {"health":"true"} etcd-0 Healthy {"health":"true"} etcd-2 Healthy {"health":"true"}
/etc/hosts
中繼續增長對應的解析。確保全部節點都能解析。/etc/salt/roster
裏面,增長對應的機器。#在master節點執行 ssh-copy-id kubeadm-node-01 scp /etc/hosts kubeadm-node-01:/etc/ salt-ssh -L 'kubeadm-node-01' state.sls k8s.modules.base-dir salt-ssh -L 'kubeadm-node-01' state.sls k8s.modules.nginx systemctl status kube-nginx #在node節點上執行 配置kubernetes阿里雲的yum源 安裝docker kubeadm join 127.0.0.1:8443 --token xa6317.1tyqmsnbt7wwhqfe \ --discovery-token-ca-cert-hash sha256:52c45df8f04b675869ad60a42c76e997d6a0da806107aa6f2e4f2963efbc4485 #以防止外部惡意的節點進入集羣。每一個token自生成起24小時後過時。屆時若是須要加入新的節點,則須要從新生成新的join token,請使用下面的命令生成,在master上執行: kubeadm token create --print-join-command
kubectl apply -f /srv/addons/metrics-server/metrics-server-kubeadm.yaml
github
因爲kubeadm的安裝方式跟二進制稍有區別,須要修改部門啓動參數以及配置。主要增長的啓動參數以下所示:docker
command: - /metrics-server - --kubelet-insecure-tls - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname
若是 此文檔對您幫助很大,您能夠掃下方二維碼隨意打賞,就當是請我喝杯茶或是咖啡,將不勝感激!
json