Flanneld 0.11.0 https://github.com/coreos/flannel/releases/node
插件:
Coredns
Dashboard
Heapster (influxdb、grafana)
Metrics-Server
EFK (elasticsearch、fluentd、kibana)linux
鏡像倉庫:
docker registry
harborgit
kube-apiserver:
使用 keepalived 和 haproxy 實現 3 節點高可用;
關閉非安全端口 8080 和匿名訪問;
在安全端口 6443 接收 https 請求;
嚴格的認證和受權策略 (x50九、token、RBAC);
開啓 bootstrap token 認證,支持 kubelet TLS bootstrapping;
使用 https 訪問 kubelet、etcd,加密通訊;github
kube-controller-manager:
3 節點高可用;
關閉非安全端口,在安全端口 10252 接收 https 請求;
使用 kubeconfig 訪問 apiserver 的安全端口;
自動 approve kubelet 證書籤名請求 (CSR),證書過時後自動輪轉;
各 controller 使用本身的 ServiceAccount 訪問 apiserver;docker
kube-scheduler:
3 節點高可用;
使用 kubeconfig 訪問 apiserver 的安全端口;json
kubelet:
使用 kubeadm 動態建立 bootstrap token,也能夠在 apiserver 中靜態配置;
使用 TLS bootstrap 機制自動生成 client 和 server 證書,過時後自動輪轉;
在 KubeletConfiguration 類型的 JSON 文件配置主要參數;
關閉只讀端口,在安全端口 10250 接收 https 請求,對請求進行認證和受權,拒絕匿名訪問和非受權訪問;
使用 kubeconfig 訪問 apiserver 的安全端口;bootstrap
kube-proxy:
使用 kubeconfig 訪問 apiserver 的安全端口;
在 KubeProxyConfiguration 類型的 JSON 文件配置主要參數;
使用 ipvs 代理模式;vim
集羣插件:
DNS:使用功能、性能更好的 coredns;
Dashboard:支持登陸認證;
Metric:heapster、metrics-server,使用 https 訪問 kubelet 安全端口;
Log:Elasticsearch、Fluend、Kibana;
Registry 鏡像庫:docker-registry、harbor;後端
[root@localhost ~]# uname -a Linux localhost.localdomain 4.18.0-80.11.2.el8_0.x86_64 #1 SMP Tue Sep 24 11:32:19 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux [root@localhost ~]# cat /etc/redhat-release CentOS Linux release 8.0.1905 (Core)
hostnamectl set-hostname k8s-master01 ... # 寫入hosts--> 注意是 >> 表示不改變原有內容追加! cat>> /etc/hosts <<EOF 192.168.2.201 k8s-master01 192.168.2.202 k8s-master02 192.168.2.203 k8s-master03 192.168.2.11 k8s-node01 192.168.2.12 k8s-node02 EOF
yum install wget vim yum-utils net-tools tar chrony curl jq ipvsadm ipset conntrack iptables sysstat libseccomp -y
# 關閉防火牆並清空防火牆規則 systemctl disable firewalld && systemctl stop firewalld && systemctl status firewalld iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat iptables -P FORWARD ACCEP # 關閉dnsmasq不然可能致使docker容器沒法解析域名!(centos8不存在!) systemctl disable --now dnsmasq # 關閉selinux --->selinux=disabled 需重啓生效! setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config # 關閉swap --->註釋掉swap那一行, 需重啓生效! swapoff -a && sed -i '/ swap / s/^\(.*\)$/# \1/g' /etc/fstab
timedatectl set-timezone Asia/Shanghai timedatectl set-local-rtc 0 yum install chrony -y systemctl enable chronyd && systemctl start chronyd && systemctl status chronyd
cat> kubernetes.conf <<EOF net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ipt6ables=1 net.ipv6.conf.all.disable_ipv6=1 EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf sysctl -p /etc/sysctl.d/kubernetes.conf
# 在每臺機器上建立目錄: mkdir -p /opt/k8s/{bin,cert,script,kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,kube-proxy} mkdir -p /opt/etcd/{bin,cert} mkdir -p /opt/lib/etcd mkdir -p /opt/flanneld/{bin,cert} mkdir -p /var/log/kubernetes # 在每臺機器上添加環境變量: sh -c "echo 'PATH=/opt/k8s/bin:/opt/etcd/bin:/opt/flanneld/bin:$PATH:$HOME/bin:$JAVA_HOME/bin' >> /etc/profile.d/k8s.sh" source /etc/profile.d/k8s.sh
[root@k8s-master01 ~]# ssh-keygen
[root@k8s-master01 ~]# ssh-copy-id root@k8s-master01 [root@k8s-master01 ~]# ssh-copy-id root@k8s-master02 [root@k8s-master01 ~]# ssh-copy-id root@k8s-master03
[root@k8s-master01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 [root@k8s-master01 ~]# mv cfssl_linux-amd64 /opt/k8s/bin/cfssl [root@k8s-master01 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 [root@k8s-master01 ~]# mv cfssljson_linux-amd64 /opt/k8s/bin/cfssljson [root@k8s-master01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 [root@k8s-master01 ~]# mv cfssl-certinfo_linux-amd64 /opt/k8s/bin/cfssl-certinfo chmod +x /opt/k8s/bin/*
[root@k8s-master01 ~]# cd /opt/k8s/cert/
[root@k8s-master01 cert]# cat> ca-config.json <<EOF { "signing": { "default": { "expiry": "876000h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "876000h" } } } } EOF
[root@k8s-master01 cert]# cat > ca-csr.json <<EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "steams" } ] } EOF
[root@k8s-master01 cert]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca # 查看是否生成! [root@k8s-master01 cert]# ls ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem
[root@k8s-master01 cert]# vi /opt/k8s/script/scp_k8s_cacert.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/k8s/cert/ca*.pem /opt/k8s/cert/ca-config.json root@${master_ip}:/opt/k8s/cert done
[root@k8s-master01 cert]# bash /opt/k8s/script/scp_k8s_cacert.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# wget https://github.com/etcd-io/etcd/releases/download/v3.3.17/etcd-v3.3.17-linux-amd64.tar.gz [root@k8s-master01 ~]# tar -xvf etcd-v3.3.17-linux-amd64.tar.gz
[root@k8s-master01 cert]# cat > /opt/etcd/cert/etcd-csr.json <<EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.2.201", "192.168.2.202", "192.168.2.203" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "steams" } ] } EOF
[root@k8s-master01 ~]# cfssl gencert -ca=/opt/k8s/cert/ca.pem -ca-key=/opt/k8s/cert/ca-key.pem -config=/opt/k8s/cert/ca-config.json -profile=kubernetes /opt/etcd/cert/etcd-csr.json | cfssljson -bare /opt/etcd/cert/etcd # 查看是否生成! [root@k8s-master01 ~]# ls /opt/etcd/cert/* etcd.csr etcd-csr.json etcd-key.pem etcd.pem
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_etcd.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /root/etcd-v3.3.17-linux-amd64/etcd* root@${master_ip}:/opt/etcd/bin ssh root@${master_ip} "chmod +x /opt/etcd/bin/*" scp /opt/etcd/cert/etcd*.pem root@${master_ip}:/opt/etcd/cert/ done
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_etcd.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# vi /opt/etcd/etcd.service.template [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] User=root Type=notify WorkingDirectory=/opt/lib/etcd/ ExecStart=/opt/etcd/bin/etcd \ --data-dir=/opt/lib/etcd \ --name ##ETCD_NAME## \ --cert-file=/opt/etcd/cert/etcd.pem \ --key-file=/opt/etcd/cert/etcd-key.pem \ --trusted-ca-file=/opt/k8s/cert/ca.pem \ --peer-cert-file=/opt/etcd/cert/etcd.pem \ --peer-key-file=/opt/etcd/cert/etcd-key.pem \ --peer-trusted-ca-file=/opt/k8s/cert/ca.pem \ --peer-client-cert-auth \ --client-cert-auth \ --listen-peer-urls=https://##MASTER_IP##:2380 \ --initial-advertise-peer-urls=https://##MASTER_IP##:2380 \ --listen-client-urls=https://##MASTER_IP##:2379,http://127.0.0.1:2379 \ --advertise-client-urls=https://##MASTER_IP##:2379 \ --initial-cluster-token=etcd-cluster-0 \ --initial-cluster=etcd0=https://192.168.2.201:2380,etcd1=https://192.168.2.202:2380,etcd2=https://192.168.2.203:2380 \ --initial-cluster-state=new Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target
[root@k8s-master01 ~]# vi /opt/k8s/script/etcd_service.sh ETCD_NAMES=("etcd0" "etcd1" "etcd2") MASTER_IPS=("$1" "$2" "$3") #替換模板文件中的變量,爲各節點建立systemd unit文件 for (( i=0; i < 3; i++ ));do sed -e "s/##ETCD_NAME##/${ETCD_NAMES[i]}/g" -e "s/##MASTER_IP##/${MASTER_IPS[i]}/g" /opt/etcd/etcd.service.template > /opt/etcd/etcd-${MASTER_IPS[i]}.service done #分發生成的systemd unit和etcd的配置文件: for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/etcd/etcd-${master_ip}.service root@${master_ip}:/etc/systemd/system/etcd.service done
[root@k8s-master01 ~]# bash /opt/k8s/script/etcd_service.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# vi /opt/k8s/script/etcd.sh MASTER_IPS=("$1" "$2" "$3") #啓動 etcd 服務 for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" ssh root@${master_ip} "systemctl daemon-reload && systemctl enable etcd && systemctl start etcd" done #檢查啓動結果,確保狀態爲 active (running) for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" ssh root@${master_ip} "systemctl status etcd|grep Active" done #驗證服務狀態,輸出均爲healthy 時表示集羣服務正常 for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" ETCDCTL_API=3 /opt/etcd/bin/etcdctl \ --endpoints=https://${master_ip}:2379 \ --cacert=/opt/k8s/cert/ca.pem \ --cert=/opt/etcd/cert/etcd.pem \ --key=/opt/etcd/cert/etcd-key.pem endpoint health done
[root@k8s-master01 ~]# bash /opt/k8s/script/etcd.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz [root@k8s-master01 ~]# mkdir flanneld [root@k8s-master01 ~]# tar -xvf flannel-v0.11.0-linux-amd64.tar.gz -C flanneld
[root@k8s-master01 ~]# cat > /opt/flanneld/cert/flanneld-csr.json <<EOF { "CN": "flanneld", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "steams" } ] } EOF
[root@k8s-master01 ~]# cfssl gencert -ca=/opt/k8s/cert/ca.pem -ca-key=/opt/k8s/cert/ca-key.pem -config=/opt/k8s/cert/ca-config.json -profile=kubernetes /opt/flanneld/cert/flanneld-csr.json | cfssljson -bare /opt/flanneld/cert/flanneld [root@k8s-master01 ~]# ll /opt/flanneld/cert/flanneld* flanneld.csr flanneld-csr.json flanneld-key.pem flanneld.pem
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_flanneld.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /root/flanneld/flanneld /root/flanneld/mk-docker-opts.sh root@${master_ip}:/opt/flanneld/bin/ ssh root@${master_ip} "chmod +x /opt/flanneld/bin/*" scp /opt/flanneld/cert/flanneld*.pem root@${master_ip}:/opt/flanneld/cert done
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_flanneld.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# ETCDCTL_API=2 etcdctl \ --endpoints="https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379" \ --ca-file=/opt/k8s/cert/ca.pem \ --cert-file=/opt/flanneld/cert/flanneld.pem \ --key-file=/opt/flanneld/cert/flanneld-key.pem \ set /atomic.io/network/config '{"Network":"10.30.0.0/16","SubnetLen": 24, "Backend": {"Type": "vxlan"}}' # 返回以下信息(寫入的Pod網段"Network"必須是/16 段地址,必須與kube-controller-manager的--cluster-cidr參數值一致) {"Network":"10.30.0.0/16","SubnetLen": 24, "Backend": {"Type": "vxlan"}}
[root@k8s-master01 ~]# vi /opt/flanneld/flanneld.service.template [Unit] Description=Flanneld overlay address etcd agent After=network.target After=network-online.target Wants=network-online.target After=etcd.service Before=docker.service [Service] Type=notify ExecStart=/opt/flanneld/bin/flanneld \ -etcd-cafile=/opt/k8s/cert/ca.pem \ -etcd-certfile=/opt/flanneld/cert/flanneld.pem \ -etcd-keyfile=/opt/flanneld/cert/flanneld-key.pem \ -etcd-endpoints=https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379 \ -etcd-prefix=/atomic.io/network \ -iface=eth0 ExecStartPost=/opt/flanneld/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker Restart=on-failure [Install] WantedBy=multi-user.target RequiredBy=docker.service
[root@k8s-master01 ~]# vi /opt/k8s/script/flanneld_service.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" #分發 flanneld systemd unit 文件到全部節點 scp /opt/flanneld/flanneld.service.template root@${master_ip}:/etc/systemd/system/flanneld.service #啓動 flanneld 服務 ssh root@${master_ip} "systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld" #檢查啓動結果 ssh root@${master_ip} "systemctl status flanneld|grep Active" done
[root@k8s-master01 ~]# bash /opt/k8s/script/flanneld_service.sh 192.168.2.201 192.168.2.202 192.168.2.203
# 查看集羣 Pod 網段(/16) [root@k8s-master01 ~]# ETCDCTL_API=2 etcdctl \ --endpoints="https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379" \ --ca-file=/opt/k8s/cert/ca.pem \ --cert-file=/opt/flanneld/cert/flanneld.pem \ --key-file=/opt/flanneld/cert/flanneld-key.pem \ get /atomic.io/network/config # 輸出: {"Network":"10.30.0.0/16","SubnetLen": 24, "Backend": {"Type": "vxlan"}} # 查看已分配的 Pod 子網段列表(/24) [root@k8s-master01 ~]# ETCDCTL_API=2 etcdctl \ --endpoints="https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379" \ --ca-file=/opt/k8s/cert/ca.pem \ --cert-file=/opt/flanneld/cert/flanneld.pem \ --key-file=/opt/flanneld/cert/flanneld-key.pem \ ls /atomic.io/network/subnets # 輸出: /atomic.io/network/subnets/10.30.34.0-24 /atomic.io/network/subnets/10.30.41.0-24 /atomic.io/network/subnets/10.30.7.0-24 # 查看某一 Pod 網段對應的節點 IP 和 flannel 接口地址 [root@k8s-master01 ~]# ETCDCTL_API=2 etcdctl \ --endpoints="https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379" \ --ca-file=/opt/k8s/cert/ca.pem \ --cert-file=/opt/flanneld/cert/flanneld.pem \ --key-file=/opt/flanneld/cert/flanneld-key.pem \ get /atomic.io/network/subnets/10.30.34.0-24 # 輸出: {"PublicIP":"192.168.2.202","BackendType":"vxlan","BackendData":{"VtepMAC":"e6:b2:85:07:9f:c0"}} # 驗證各節點能經過 Pod 網段互通, 注意輸出的pod網段! [root@k8s-master01 ~]# vi /opt/k8s/script/ping_flanneld.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" #在各節點上部署 flannel 後,檢查是否建立了 flannel 接口(名稱可能爲 flannel0、flannel.0、flannel.1 等) ssh ${master_ip} "/usr/sbin/ip addr show flannel.1|grep -w inet" #在各節點上 ping 全部 flannel 接口 IP,確保能通 ssh ${master_ip} "ping -c 1 10.30.34.0" ssh ${master_ip} "ping -c 1 10.30.41.0" ssh ${master_ip} "ping -c 1 10.30.7.0" done # 運行! [root@k8s-master01 ~]# bash /opt/k8s/script/ping_flanneld.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz [root@k8s-master01 ~]# tar -zxvf kubernetes-server-linux-amd64.tar.gz
[root@k8s-master01 ~]# vi /opt/k8s/script/kubectl_environment.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /root/kubernetes/server/bin/kubectl root@${master_ip}:/opt/k8s/bin/ done
[root@k8s-master01 ~]# bash /opt/k8s/script/kubectl_environment.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# cat > /opt/k8s/cert/admin-csr.json <<EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:masters", "OU": "steams" } ] } EOF
[root@k8s-master01 ~]# cfssl gencert -ca=/opt/k8s/cert/ca.pem \ -ca-key=/opt/k8s/cert/ca-key.pem \ -config=/opt/k8s/cert/ca-config.json \ -profile=kubernetes /opt/k8s/cert/admin-csr.json | cfssljson -bare /opt/k8s/cert/admin [root@k8s-master01 ~]# ll /opt/k8s/cert/admin* admin.csr admin-csr.json admin-key.pem admin.pem
step.1 設置集羣參數, --server=${KUBE_APISERVER}, 指定IP和端口; 本文使用的是haproxy的VIP和端口;若是沒有haproxy代理,就用實際服務的IP和端口!centos
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/cert/ca.pem \ --embed-certs=true \ --server=https://192.168.2.210:8443 \ --kubeconfig=/root/.kube/kubectl.kubeconfig
step.2 設置客戶端認證參數
[root@k8s-master01 ~]# kubectl config set-credentials kube-admin \ --client-certificate=/opt/k8s/cert/admin.pem \ --client-key=/opt/k8s/cert/admin-key.pem \ --embed-certs=true \ --kubeconfig=/root/.kube/kubectl.kubeconfig
step.3 設置上下文參數
[root@k8s-master01 ~]# kubectl config set-context kube-admin@kubernetes \ --cluster=kubernetes \ --user=kube-admin \ --kubeconfig=/root/.kube/kubectl.kubeconfig
step.4設置默認上下文
[root@k8s-master01 ~]# kubectl config use-context kube-admin@kubernetes --kubeconfig=/root/.kube/kubectl.kubeconfig
--certificate-authority :驗證 kube-apiserver 證書的根證書;
--client-certificate 、 --client-key :剛生成的 admin 證書和私鑰,鏈接 kube-apiserver 時使用;
--embed-certs=true :將 ca.pem 和 admin.pem 證書內容嵌入到生成的kubectl.kubeconfig 文件中(不加時,寫入的是證書文件路徑);
[root@k8s-master01 ~]# kubectl config view --kubeconfig=/root/.kube/kubectl.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority-data: DATA+OMITTED server: https://192.168.2.210:8443 name: kubernetes contexts: - context: cluster: kubernetes user: kube-admin name: kube-admin@kubernetes current-context: kube-admin@kubernetes kind: Config preferences: {} users: - name: kube-admin user: client-certificate-data: REDACTED client-key-data: REDACTED
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_kubectl_config.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /root/kubernetes/server/bin/kubectl root@${master_ip}:/opt/k8s/bin/ ssh root@${master_ip} "chmod +x /opt/k8s/bin/*" scp /root/.kube/kubectl.kubeconfig root@${master_ip}:/root/.kube/config done
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_kubectl_config.sh 192.168.2.201 192.168.2.202 192.168.2.203
kubernetes master 節點運行以下組件:
kube-apiserver
kube-scheduler
kube-controller-manager
由於對master作了keepalived高可用,因此3臺服務器都有可能會升成master服務器(主master宕機,會有從升級爲主);所以全部的master操做,在3個服務器上都要進行。
[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz [root@k8s-master01 ~]# wget https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_master.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /root/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} root@${master_ip}:/opt/k8s/bin/ ssh root@${master_ip} "chmod +x /opt/k8s/bin/*" done
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_master.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# yum install keepalived haproxy -y [root@k8s-master01 ~]# vi /etc/haproxy/haproxy.cfg global log /dev/log local0 log /dev/log local1 notice chroot /var/lib/haproxy stats socket /var/run/haproxy-admin.sock mode 660 level admin stats timeout 30s user haproxy group haproxy daemon nbproc 1 defaults log global timeout connect 5000 timeout client 10m timeout server 10m listen admin_stats bind 0.0.0.0:10080 mode http log 127.0.0.1 local0 err stats refresh 30s stats uri /status stats realm welcome login\ Haproxy stats auth haproxy:123456 stats hide-version stats admin if TRUE listen k8s-master bind 0.0.0.0:8443 mode tcp option tcplog balance source server 192.168.2.201 192.168.2.201:6443 check inter 2000 fall 2 rise 2 weight 1 server 192.168.2.202 192.168.2.202:6443 check inter 2000 fall 2 rise 2 weight 1 server 192.168.2.203 192.168.2.203:6443 check inter 2000 fall 2 rise 2 weight 1
[root@k8s-master01 ~]# vi /opt/k8s/script/haproxy.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" #安裝haproxy ssh root@${master_ip} "yum install -y keepalived haproxy" #下發配置文件 scp /etc/haproxy/haproxy.cfg root@${master_ip}:/etc/haproxy #啓動檢查haproxy服務 ssh root@${master_ip} "systemctl restart haproxy" ssh root@${master_ip} "systemctl enable haproxy.service" ssh root@${master_ip} "systemctl status haproxy|grep Active" #檢查 haproxy 是否監聽6443 端口 ssh root@${master_ip} "netstat -lnpt|grep haproxy" done
[root@k8s-master01 ~]# bash /opt/k8s/script/haproxy.sh 192.168.2.201 192.168.2.202 192.168.2.203
輸出相似:
Active: active (running) since Tue 2019-11-12 01:54:41 CST; 543ms ago tcp 0 0 0.0.0.0:8443 0.0.0.0:* LISTEN 4995/haproxy tcp 0 0 0.0.0.0:10080 0.0.0.0:* LISTEN 4995/haproxy
[root@k8s-master01 ~]# vim /etc/keepalived/keepalived.conf global_defs { router_id keepalived_ha_121 } vrrp_script check-haproxy { script "killall -0 haproxy" interval 5 weight -30 } vrrp_instance VI-k8s-master { state MASTER priority 120 # 第一臺從爲110, 以此類推! dont_track_primary interface eth0 virtual_router_id 121 advert_int 3 track_script { check-haproxy } virtual_ipaddress { 192.168.2.210 } }
[root@k8s-master02 ~]# vi /etc/keepalived/keepalived.conf global_defs { router_id keepalived_ha_122_123 } vrrp_script check-haproxy { script "killall -0 haproxy" interval 5 weight -30 } vrrp_instance VI-k8s-master { state BACKUP priority 110 # 第2臺從爲100 dont_track_primary interface eth0 virtual_router_id 121 advert_int 3 track_script { check-haproxy } virtual_ipaddress { 192.168.2.210 } }
[root@k8s-master01 ~]# systemctl restart keepalived && systemctl enable keepalived && systemctl status keepalived
[root@k8s-master01 ~]# ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000 link/ether 00:15:5d:00:68:05 brd ff:ff:ff:ff:ff:ff inet 192.168.2.101/24 brd 192.168.2.255 scope global noprefixroute eth0 valid_lft forever preferred_lft forever inet 192.168.2.10/32 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::f726:9d22:2b89:694c/64 scope link noprefixroute valid_lft forever preferred_lft forever 3: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default link/ether aa:43:e5:bb:88:28 brd ff:ff:ff:ff:ff:ff inet 10.30.34.0/32 scope global flannel.1 valid_lft forever preferred_lft forever inet6 fe80::a843:e5ff:febb:8828/64 scope link valid_lft forever preferred_lft forever
[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-apiserver-csr.json <<EOF { "CN": "kubernetes", "hosts": [ "127.0.0.1", "192.168.2.201", "192.168.2.202", "192.168.2.203", "192.168.2.210", "10.96.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "steams" } ] } EOF
[root@k8s-master01 ~]# cfssl gencert -ca=/opt/k8s/cert/ca.pem \ -ca-key=/opt/k8s/cert/ca-key.pem \ -config=/opt/k8s/cert/ca-config.json \ -profile=kubernetes /opt/k8s/cert/kube-apiserver-csr.json | cfssljson -bare /opt/k8s/cert/kube-apiserver [root@k8s-master01 ~]# ll /opt/k8s/cert/kube-apiserver* kube-apiserver.csr kube-apiserver-csr.json kube-apiserver-key.pem kube-apiserver.pem
[root@k8s-master01 ~]# head -c 32 /dev/urandom | base64 # 返回一個key, 每臺master節點須要用同樣的 Key!!! muqIUutYDd5ARLtsg/W1CYWs3g8Fq9uJO/lDpSsv9iw=
[root@k8s-master01 ~]# vi encryption-config.yaml kind: EncryptionConfig apiVersion: v1 resources: - resources: - secrets providers: - aescbc: keys: - name: key1 secret: muqIUutYDd5ARLtsg/W1CYWs3g8Fq9uJO/lDpSsv9iw= - identity: {}
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_apiserver.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/k8s/cert/kube-apiserver*.pem root@${master_ip}:/opt/k8s/cert/ scp /root/encryption-config.yaml root@${master_ip}:/opt/k8s/ done
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_apiserver.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# vi /opt/k8s/kube-apiserver/kube-apiserver.service.template [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] ExecStart=/opt/k8s/bin/kube-apiserver \ --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \ --anonymous-auth=false \ --experimental-encryption-provider-config=/opt/k8s/encryption-config.yaml \ --advertise-address=##MASTER_IP## \ --bind-address=##MASTER_IP## \ --insecure-port=0 \ --authorization-mode=Node,RBAC \ --runtime-config=api/all \ --enable-bootstrap-token-auth \ --service-cluster-ip-range=10.96.0.0/16 \ --service-node-port-range=30000-50000 \ --tls-cert-file=/opt/k8s/cert/kube-apiserver.pem \ --tls-private-key-file=/opt/k8s/cert/kube-apiserver-key.pem \ --client-ca-file=/opt/k8s/cert/ca.pem \ --kubelet-client-certificate=/opt/k8s/cert/kube-apiserver.pem \ --kubelet-client-key=/opt/k8s/cert/kube-apiserver-key.pem \ --service-account-key-file=/opt/k8s/cert/ca-key.pem \ --etcd-cafile=/opt/k8s/cert/ca.pem \ --etcd-certfile=/opt/k8s/cert/kube-apiserver.pem \ --etcd-keyfile=/opt/k8s/cert/kube-apiserver-key.pem \ --etcd-servers=https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --apiserver-count=3 \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/kube-apiserver-audit.log \ --event-ttl=1h \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target
[root@k8s-master01 ~]# vi /opt/k8s/script/apiserver_service.sh MASTER_IPS=("$1" "$2" "$3") #替換模板文件中的變量,爲各節點建立 systemd unit 文件 for (( i=0; i < 3; i++ ));do sed "s/##MASTER_IP##/${MASTER_IPS[i]}/" /opt/k8s/kube-apiserver/kube-apiserver.service.template > /opt/k8s/kube-apiserver/kube-apiserver-${MASTER_IPS[i]}.service done #啓動並檢查 kube-apiserver 服務 for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/k8s/kube-apiserver/kube-apiserver-${master_ip}.service root@${master_ip}:/etc/systemd/system/kube-apiserver.service ssh root@${master_ip} "systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver" ssh root@${master_ip} "systemctl status kube-apiserver |grep 'Active:'" done
[root@k8s-master01 ~]# bash /opt/k8s/script/apiserver_service.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# ETCDCTL_API=3 etcdctl \ --endpoints="https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379" \ --cacert=/opt/k8s/cert/ca.pem \ --cert=/opt/etcd/cert/etcd.pem \ --key=/opt/etcd/cert/etcd-key.pem \ get /registry/ --prefix --keys-only
[root@k8s-master01 ~]# kubectl cluster-info Kubernetes master is running at https://192.168.2.210:8443 [root@k8s-master01 ~]# kubectl get all --all-namespaces NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 49m # 6443: 接收 https 請求的安全端口,對全部請求作認證和受權; # 因爲關閉了非安全端口,故沒有監聽 8080; [root@k8s-master01 ~]# ss -nutlp |grep apiserver tcp LISTEN 0 128 192.168.2.201:6443 0.0.0.0:* users:(("kube-apiserver",pid=4425,fd=6))
[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-controller-manager-csr.json <<EOF { "CN": "system:kube-controller-manager", "key": { "algo": "rsa", "size": 2048 }, "hosts": [ "127.0.0.1", "192.168.2.201", "192.168.2.202", "192.168.2.203" ], "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:kube-controller-manager", "OU": "steams" } ] } EOF
cfssl gencert -ca=/opt/k8s/cert/ca.pem \ -ca-key=/opt/k8s/cert/ca-key.pem \ -config=/opt/k8s/cert/ca-config.json \ -profile=kubernetes /opt/k8s/cert/kube-controller-manager-csr.json | cfssljson -bare /opt/k8s/cert/kube-controller-manager [root@k8s-master01 ~]# ll /opt/k8s/cert/kube-controller-manager* kube-controller-manager.csr kube-controller-manager-csr.json kube-controller-manager-key.pem kube-controller-manager.pem
# step.1 設置集羣參數: [root@k8s-master01 ~]# kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/cert/ca.pem \ --embed-certs=true \ --server=https://192.168.2.210:8443 \ --kubeconfig=/opt/k8s/kube-controller-manager/kube-controller-manager.kubeconfig # step.2 設置客戶端認證參數 [root@k8s-master01 ~]# kubectl config set-credentials system:kube-controller-manager \ --client-certificate=/opt/k8s/cert/kube-controller-manager.pem \ --client-key=/opt/k8s/cert/kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=/opt/k8s/kube-controller-manager/kube-controller-manager.kubeconfig # step.3 設置上下文參數 [root@k8s-master01 ~]# kubectl config set-context system:kube-controller-manager@kubernetes \ --cluster=kubernetes \ --user=system:kube-controller-manager \ --kubeconfig=/opt/k8s/kube-controller-manager/kube-controller-manager.kubeconfig # tep.4 設置默認上下文 [root@k8s-master01 ~]# kubectl config use-context system:kube-controller-manager@kubernetes \ --kubeconfig=/opt/k8s/kube-controller-manager/kube-controller-manager.kubeconfig
[root@k8s-master01 ~]# kubectl config view --kubeconfig=/opt/k8s/kube-controller-manager/kube-controller-manager.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority-data: DATA+OMITTED server: https://192.168.2.210:8443 name: kubernetes contexts: - context: cluster: kubernetes user: system:kube-controller-manager name: system:kube-controller-manager@kubernetes current-context: system:kube-controller-manager@kubernetes kind: Config preferences: {} users: - name: system:kube-controller-manager user: client-certificate-data: REDACTED client-key-data: REDACTED
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_controller_manager.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/k8s/cert/kube-controller-manager*.pem root@${master_ip}:/opt/k8s/cert/ scp /opt/k8s/kube-controller-manager/kube-controller-manager.kubeconfig root@${master_ip}:/opt/k8s/kube-controller-manager/ done
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_controller_manager.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# vi /opt/k8s/kube-controller-manager/kube-controller-manager.service.template [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/opt/k8s/bin/kube-controller-manager \ --port=0 \ --secure-port=10252 \ --bind-address=127.0.0.1 \ --kubeconfig=/opt/k8s/kube-controller-manager/kube-controller-manager.kubeconfig \ --service-cluster-ip-range=10.96.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/opt/k8s/cert/ca.pem \ --cluster-signing-key-file=/opt/k8s/cert/ca-key.pem \ --experimental-cluster-signing-duration=8760h \ --root-ca-file=/opt/k8s/cert/ca.pem \ --service-account-private-key-file=/opt/k8s/cert/ca-key.pem \ --leader-elect=true \ --feature-gates=RotateKubeletServerCertificate=true \ --controllers=*,bootstrapsigner,tokencleaner \ --horizontal-pod-autoscaler-use-rest-clients=true \ --horizontal-pod-autoscaler-sync-period=10s \ --tls-cert-file=/opt/k8s/cert/kube-controller-manager.pem \ --tls-private-key-file=/opt/k8s/cert/kube-controller-manager-key.pem \ --use-service-account-credentials=true \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
[root@k8s-master01 ~]# vi /opt/k8s/script/controller_manager_service.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/k8s/kube-controller-manager/kube-controller-manager.service.template root@${master_ip}:/etc/systemd/system/kube-controller-manager.service ssh root@${master_ip} "systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager " ssh root@${master_ip} "systemctl status kube-controller-manager|grep Active" done
[root@k8s-master01 ~]# bash /opt/k8s/script/controller_manager_service.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# ss -nutlp |grep kube-controll tcp LISTEN 0 128 127.0.0.1:10252 0.0.0.0:* users:(("kube-controller",pid=9382,fd=6))
[root@k8s-master02 ~]# kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
準備工做:下載kube-scheduler 的二進制文件---^^^
[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-scheduler-csr.json <<EOF { "CN": "system:kube-scheduler", "hosts": [ "127.0.0.1", "192.168.2.201", "192.168.2.202", "192.168.2.203" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:kube-scheduler", "OU": "steams" } ] } EOF
[root@k8s-master01 ~]# cfssl gencert -ca=/opt/k8s/cert/ca.pem \ -ca-key=/opt/k8s/cert/ca-key.pem \ -config=/opt/k8s/cert/ca-config.json \ -profile=kubernetes /opt/k8s/cert/kube-scheduler-csr.json | cfssljson -bare /opt/k8s/cert/kube-scheduler [root@k8s-master01 ~]# ll /opt/k8s/cert/kube-scheduler* kube-scheduler.csr kube-scheduler-csr.json kube-scheduler-key.pem kube-scheduler.pem
# step.1 設置集羣參數 [root@k8s-master01 ~]# kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/cert/ca.pem \ --embed-certs=true \ --server=https://192.168.2.210:8443 \ --kubeconfig=/opt/k8s/kube-scheduler/kube-scheduler.kubeconfig # step.2 設置客戶端認證參數 [root@k8s-master01 ~]# kubectl config set-credentials system:kube-scheduler \ --client-certificate=/opt/k8s/cert/kube-scheduler.pem \ --client-key=/opt/k8s/cert/kube-scheduler-key.pem \ --embed-certs=true \ --kubeconfig=/opt/k8s/kube-scheduler/kube-scheduler.kubeconfig # step.3 設置上下文參數 [root@k8s-master01 ~]# kubectl config set-context system:kube-scheduler@kubernetes \ --cluster=kubernetes \ --user=system:kube-scheduler \ --kubeconfig=/opt/k8s/kube-scheduler/kube-scheduler.kubeconfig # step.4設置默認上下文 [root@k8s-master01 ~]# kubectl config use-context system:kube-scheduler@kubernetes \ --kubeconfig=/opt/k8s/kube-scheduler/kube-scheduler.kubeconfig
[root@k8s-master01 ~]# kubectl config view --kubeconfig=/opt/k8s/kube-scheduler/kube-scheduler.kubeconfig apiVersion: v1 clusters: - cluster: certificate-authority-data: DATA+OMITTED server: https://192.168.2.210:8443 name: kubernetes contexts: - context: cluster: kubernetes user: system:kube-scheduler name: system:kube-scheduler@kubernetes current-context: system:kube-scheduler@kubernetes kind: Config preferences: {} users: - name: system:kube-scheduler user: client-certificate-data: REDACTED client-key-data: REDACTED
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_scheduler.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/k8s/cert/kube-scheduler*.pem root@${master_ip}:/opt/k8s/cert/ scp /opt/k8s/kube-scheduler/kube-scheduler.kubeconfig root@${master_ip}:/opt/k8s/kube-scheduler/ done
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_scheduler.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# vi /opt/k8s/kube-scheduler/kube-scheduler.service.template [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/opt/k8s/bin/kube-scheduler \ --address=127.0.0.1 \ --kubeconfig=/opt/k8s/kube-scheduler/kube-scheduler.kubeconfig \ --leader-elect=true \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
[root@k8s-master01 ~]# vi /opt/k8s/script/scheduler_service.sh MASTER_IPS=("$1" "$2" "$3") for master_ip in ${MASTER_IPS[@]};do echo ">>> ${master_ip}" scp /opt/k8s/kube-scheduler/kube-scheduler.service.template root@${master_ip}:/etc/systemd/system/kube-scheduler.service ssh root@${master_ip} "systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler && systemctl status kube-scheduler|grep Active" done
[root@k8s-master01 ~]# bash /opt/k8s/script/scheduler_service.sh 192.168.2.201 192.168.2.202 192.168.2.203
[root@k8s-master01 ~]# ss -nutlp |grep kube-scheduler tcp LISTEN 0 128 127.0.0.1:10251 0.0.0.0:* users:(("kube-scheduler",pid=8584,fd=6)) tcp LISTEN 0 128 *:10259 *:* users:(("kube-scheduler",pid=8584,fd=7)) [root@k8s-master01 ~]# curl -s http://127.0.0.1:10251/metrics |head # HELP apiserver_audit_event_total [ALPHA] Counter of audit events generated and sent to the audit backend. # TYPE apiserver_audit_event_total counter apiserver_audit_event_total 0 # HELP apiserver_audit_requests_rejected_total [ALPHA] Counter of apiserver requests rejected due to an error in audit logging backend. # TYPE apiserver_audit_requests_rejected_total counter apiserver_audit_requests_rejected_total 0 # HELP apiserver_client_certificate_expiration_seconds [ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request. # TYPE apiserver_client_certificate_expiration_seconds histogram apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0 apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0
[root@k8s-master02 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml