1. 環境信息node
System OS IP Address Docker Kernel Hostname Cpu Memory Rolelinux
CentOS 7.7.1908 192.168.1.171 19.03.7 3.10.0-1062.12.1.el7 k8s-m01 2C 2G k8s-mastergit
CentOS 7.7.1908 192.168.1.172 19.03.7 3.10.0-1062.12.1.el7 k8s-m02 2C 2G k8s-mastergithub
CentOS 7.7.1908 192.168.1.173 19.03.7 3.10.0-1062.12.1.el7 k8s-m03 2C 2G k8s-masterdocker
CentOS 7.7.1908 192.168.1.174 19.03.7 3.10.0-1062.12.1.el7 k8s-n01 2C 2G k8s-nodejson
2. 版本信息vim
kubeadm: v1.17.3centos
Kubernetes: v1.17.3api
etcd: 3.4.3-0bash
Docker CE: 19.03.7
Calico: v3.13.0
3. 網絡信息
Cluster IP CIDR: 10.244.0.0/16
Service Cluster IP CIDR: 10.96.0.0/12
Service DNS IP: 10.96.0.10
DNS DN: cluster.local
Kubernetes API: apiserver.k8s.local:6443 # apiserver.k8s.local 須要綁定host解析,ip指向k8s master的api server
4. YUM源配置
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum list docker-ce.x86_64 --showduplicates | sort -r
yum -y install docker-ce
5. 關閉防火牆
systemctl stop firewalld && systemctl disable firewalld
6. 關掉網絡服務
systemctl stop NetworkManager && systemctl disable NetworkManager
## 爲何要關閉NetworkManager,由於Calico會跟NetworkManager發生衝突,NetworkManager會試圖接管Calico。
7. 關閉selinux
setenforce 0
sed -i "s#=enforcing#=disabled#g" /etc/selinux/config
8. 關閉swap
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
9. 同步時間
yum install -y chrony
ntpdate 0.cn.pool.ntp.org
hwclock --systohc
cat << EOF >> /etc/ntp.conf
server 0.cn.pool.ntp.org
server 1.cn.pool.ntp.org
server 2.cn.pool.ntp.org
server 3.cn.pool.ntp.org
EOF
systemctl restart chronyd && systemctl enable chronyd
10. 系統參數調整
cat <<EOF > /etc/sysctl.d/k8s.conf
# 修復ipvs模式下長鏈接timeout問題 小於900便可
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
# 要求iptables不對bridge的數據進行處理
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
sysctl --system
11. 設置節點主機名解析
cat << EOF >> /etc/hosts
192.168.1.171 k8s-m01
192.168.1.172 k8s-m02
192.168.1.173 k8s-m03
192.168.1.174 k8s-n01
EOF
12. 啓用ipvs
yum install ipvsadm ipset sysstat conntrack libseccomp -y
## 開機自啓動加載ipvs內核
vim /etc/modules-load.d/ipvs.conf
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
modprobe br_netfilter
chmod 755 /etc/modules-load.d/ipvs.conf
bash /etc/modules-load.d/ipvs.conf
13. 配置docker
cat >> /etc/docker/daemon.json <<EOF
{
"log-driver": "json-file",
"log-opts": {"max-size": "100m"},
"storage-driver": "overlay2",
"storage-opts": ["overlay2.override_kernel_check=true"],
"max-concurrent-downloads": 10,
"max-concurrent-uploads": 10,
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://xxxxx.mirror.aliyuncs.com"
]
}
EOF
systemctl start docker && systemctl enable docker
14. 配置免密登陸
在ks8-m01操做
yum install sshpass -y
ssh-keygen -t rsa -P '' -f /root/.ssh/id_rsa
for NODE in k8s-m01 k8s-m02 k8s-m03 k8s-n01; do
echo "--- $NODE ---"
sshpass -p abcd1234 ssh-copy-id -o "StrictHostKeyChecking no" -i /root/.ssh/id_rsa.pub ${NODE}
ssh ${NODE} "hostnamectl set-hostname ${NODE}"
done
15. 下面的操做在k8s-m01節點上進行
創建kubeadm-config.yaml
cat <<EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.17.3
controlPlaneEndpoint: "apiserver.k8s.local:6443"
networking:
podSubnet: "10.244.0.0/16"
imageRepository: "gcr.azk8s.cn/google_containers"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
EOF
## controlPlaneEndpoint 是api server的地址
## 添加host綁定
echo '127.0.0.1 apiserver.k8s.local' >> /etc/hosts
16. 使用kubeadm初始化control plane
## 若是網速太慢,能夠先把image都pull下來
kubeadm config images pull --config=kubeadm-config.yaml
kubeadm init --config=kubeadm-config.yaml --upload-certs
…
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w \
--discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908 \
--control-plane --certificate-key 39ed22131651a43bc0473e4fbc19fc7eb2540f8a3e7abb155ddad15a6394c4fb
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w \
--discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908
17. k8s-m01設置kubeconfig
mkdir -p $HOME/.kube
cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-m01 ~]# kubectl get no
NAME STATUS ROLES AGE VERSION
k8s-m01 NotReady master 3m32s v1.17.3
[root@k8s-m01 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
18. k8s-m01部署 Calico CNI plugin
wget https://docs.projectcalico.org/manifests/calico.yaml
kubectl apply -f calico.yaml
## 最新版的Calico已經不須要修改YAML配置文件中的Cluster CIDR,若是系統已經設置Cluster CIDR,它會自適應。
## 下載Calico管理工具
wget -O /usr/local/bin/calicoctl https://github.com/projectcalico/calicoctl/releases/download/v3.13.0/calicoctl
chmod +x /usr/local/bin/calicoctl
export DATASTORE_TYPE=kubernetes
export KUBECONFIG=~/.kube/config
[root@k8s-m01 ~]# calicoctl get ippool -o wide
NAME CIDR NAT IPIPMODE VXLANMODE DISABLED SELECTOR
default-ipv4-ippool 10.244.0.0/16 true Always Never false all()
19. 在其餘k8s-mater節點上執行
將其餘兩個master節點加入進集羣
## api域名先指向k8s-m01
echo '192.168.1.171 apiserver.k8s.local' >> /etc/hosts
kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w \
--discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908 \
--control-plane --certificate-key 39ed22131651a43bc0473e4fbc19fc7eb2540f8a3e7abb155ddad15a6394c4fb
mkdir -p $HOME/.kube
cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
## 而後再將api域名指向本地
sed -i 's#192.168.1.171 apiserver.k8s.local#127.0.0.1 apiserver.k8s.local#g' /etc/hosts
21. 配置haproxy
## 使用haproxy來提供 Kubernetes API Server 的負載均衡,因爲資源緊張故haproxy安裝配置在k8s-n01節點上。
mkdir /usr/local/haproxy
tar -zxvf haproxy-1.9.10.tar.gz
cd haproxy-1.9.10
make TARGET=linux31## rhel7使用linux31,rhel6使用linux26,這個是根據內核版本肯定的
make install PREFIX=/usr/local/haproxy
cat <<EOF > /usr/local/haproxy/conf/haproxy.cfg
global
log 127.0.0.1 local0 info
defaults
log global
mode http
option dontlognull
timeout connect 5000ms
timeout client 600000ms
timeout server 600000ms
listen stats
bind :9090
mode http
balance
stats refresh 10s
stats hide-version
stats uri /haproxy_stats
stats auth admin:admin123
stats admin if TRUE
frontend kube-apiserver-https
mode tcp
bind :6443
default_backend kube-apiserver-backend
backend kube-apiserver-backend
mode tcp
balance roundrobin
stick-table type ip size 200k expire 30m
stick on src
server apiserver1 192.168.1.171:6443 check
server apiserver2 192.168.1.172:6443 check
server apiserver3 192.168.1.173:6443 check
EOF
## 配置rsyslog
vim /etc/rsyslog.conf
# Provides UDP syslog reception
$ModLoad imudp
$UDPServerRun 514
# 添加以下一行
local0.* /var/log/haproxy.log
vim /etc/sysconfig/rsyslog
SYSLOGD_OPTIONS="-r -m 0 -c 2"
systemctl restart rsyslog
## 啓動haproxy
/usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/conf/haproxy.conf &
22. k8s-n01加入集羣
## 這裏配置haproxy節點IP,因爲haproxy配置在k8s-n01本地,因此寫127.0.0.1。
echo '127.0.0.1 apiserver.k8s.local' >> /etc/hosts
kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w \
--discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908
23. 檢驗成果
[root@k8s-m01 k8s]# kubectl get no -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-m01 Ready master 20h v1.17.3 192.168.1.171 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.7
k8s-m02 Ready master 20h v1.17.3 192.168.1.172 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.7
k8s-m03 Ready master 20h v1.17.3 192.168.1.173 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.7
k8s-n01 Ready <none> 55m v1.17.3 192.168.1.174 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.7