步驟跟以前安裝1.13版本的是同樣的
區別就在於kubeadm init
的configuration file
目前kubeadm init with configuration file
已經處於beta階段了,在1.15版本已經進入到了v1beta2
版本
雖然還沒到GA
版,可是相對於手動配置k8s集羣,kubeadm
不但簡化了步驟,並且還減小了手動部署的出錯的機率,何樂而不爲呢 前端
系統版本:CentOS 7.6 內核:4.18.7-1.el7.elrepo.x86_64 Kubernetes: v1.14.1 Docker-ce: 18.09 Keepalived保證apiserever服務器的IP高可用 Haproxy實現apiserver的負載均衡 master x3 && etcd x3 保證k8s集羣可用性 192.168.1.1 master 192.168.1.2 master2 192.168.1.3 master3 192.168.1.4 Keepalived + Haproxy 192.168.1.5 Keepalived + Haproxy 192.168.1.6 etcd1 192.168.1.7 etcd2 192.168.1.8 etcd3 192.168.1.9 node1 192.168.1.10 node2 192.168.1.100 VIP、apiserver的地址
爲方便操做,全部操做均以root用戶執行
如下操做僅在kubernetes集羣節點執行便可node
sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config setenforce 0 systemctl disable firewalld systemctl stop firewalld
swapoff -a
cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 #vm.swappiness=0 EOF sysctl --system
cat << EOF > /etc/sysconfig/modules/ipvs.modules #!/bin/bash ipvs_modules_dir="/usr/lib/modules/\`uname -r\`/kernel/net/netfilter/ipvs" for i in \`ls \$ipvs_modules_dir | sed -r 's#(.*).ko.*#\1#'\`; do /sbin/modinfo -F filename \$i &> /dev/null if [ \$? -eq 0 ]; then /sbin/modprobe \$i fi done EOF chmod +x /etc/sysconfig/modules/ipvs.modules bash /etc/sysconfig/modules/ipvs.modules
#在master節點安裝便可!!! wget -O /bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget -O /bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget -O /bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 for cfssl in `ls /bin/cfssl*`;do chmod +x $cfssl;done;
cat << EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF yum install -y kubelet-1.14.1 kubeadm-1.14.1 kubectl-1.14.1
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum install -y docker-ce mkdir /etc/docker/ cat << EOF > /etc/docker/daemon.json { "exec-opts": ["native.cgroupdriver=systemd"], "registry-mirrors": ["https://registry.docker-cn.com"], "live-restore": true, "default-shm-size": "128M", "bridge": "none", "max-concurrent-downloads": 10, "oom-score-adjust": -1000, "debug": false } EOF #重啓docker systemctl daemon-reload systemctl enable docker systemctl restart docker
#爲全部節點配置hosts文件 192.168.1.1 master 192.168.1.2 master2 192.168.1.3 master3 192.168.1.4 lb1 192.168.1.5 lb2 192.168.1.6 etcd1 192.168.1.7 etcd2 192.168.1.8 etcd3 192.168.1.9 node1 192.168.1.10 node2
mkdir -pv $HOME/ssl && cd $HOME/ssl cat << EOF > ca-config.json { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } } } EOF cat << EOF > etcd-ca-csr.json { "CN": "etcd", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "etcd", "OU": "Etcd Security" } ] } EOF cat << EOF > etcd-csr.json { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.1.6", "192.168.1.7", "192.168.1.8" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shenzhen", "L": "Shenzhen", "O": "etcd", "OU": "Etcd Security" } ] } EOF #生成證書並複製證書至其餘etcd節點 cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd mkdir -pv /etc/etcd/ssl cp etcd*.pem /etc/etcd/ssl mkdir -pv /etc/kubernetes/pki/etcd cp etcd*.pem /etc/kubernetes/pki/etcd scp -r /etc/etcd 192.168.1.6:/etc/ scp -r /etc/etcd 192.168.1.7:/etc/ scp -r /etc/etcd 192.168.1.8:/etc/
yum install -y etcd cat << EOF > /etc/etcd/etcd.conf #[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="https://192.168.1.6:2380" ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.6:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd1" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.6:2380" ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.6:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.6:2380,etcd2=https://192.168.1.7:2380,etcd3=https://192.168.1.8:2380" ETCD_INITIAL_CLUSTER_TOKEN="BigBoss" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" #ETCD_CLIENT_CERT_AUTH="false" ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem" #ETCD_AUTO_TLS="false" ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" #ETCD_PEER_CLIENT_CERT_AUTH="false" ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple" EOF chown -R etcd.etcd /etc/etcd systemctl enable etcd systemctl start etcd
yum install -y etcd cat << EOF > /etc/etcd/etcd.conf #[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="https://192.168.1.7:2380" ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.7:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd2" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.7:2380" ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.7:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.6:2380,etcd2=https://192.168.1.7:2380,etcd3=https://192.168.1.8:2380" ETCD_INITIAL_CLUSTER_TOKEN="BigBoss" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" #ETCD_CLIENT_CERT_AUTH="false" ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem" #ETCD_AUTO_TLS="false" ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" #ETCD_PEER_CLIENT_CERT_AUTH="false" ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple" EOF chown -R etcd.etcd /etc/etcd systemctl enable etcd systemctl start etcd
yum install -y etcd cat << EOF > /etc/etcd/etcd.conf #[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="https://192.168.1.8:2380" ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.8:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd3" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.8:2380" ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://192.168.1.8:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.6:2380,etcd2=https://192.168.1.7:2380,etcd3=https://192.168.1.8:2380" ETCD_INITIAL_CLUSTER_TOKEN="BigBoss" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" #ETCD_CLIENT_CERT_AUTH="false" ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem" #ETCD_AUTO_TLS="false" ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" #ETCD_PEER_CLIENT_CERT_AUTH="false" ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple" EOF chown -R etcd.etcd /etc/etcd systemctl enable etcd systemctl start etcd
etcdctl --endpoints "https://192.168.1.6:2379,https://192.168.1.7:2379,https://192.168.1.8:2379" --ca-file=/etc/etcd/ssl/etcd-ca.pem \ --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health [root@node3 ~]# etcdctl --endpoints "https://192.168.1.6:2379,https://192.168.1.7:2379,https://192.168.1.8:2379" --ca-file=/etc/etcd/ssl/etcd-ca.pem \ > --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health member 3639deb1869a1bda is healthy: got healthy result from https://127.0.0.1:2379 member b75e13f1faa57bd8 is healthy: got healthy result from https://127.0.0.1:2379 member e31fec5bb4c882f2 is healthy: got healthy result from https://127.0.0.1:2379
yum install -y keepalived cat << EOF > /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { notification_email { root@localhost #發送郵箱 } notification_email_from keepalived@localhost #郵箱地址 smtp_server 127.0.0.1 #郵件服務器地址 smtp_connect_timeout 30 router_id node1 #主機名,每一個節點不一樣便可 vrrp_mcast_group4 224.0.100.100 #組播地址 } vrrp_instance VI_1 { state MASTER #在另外一個節點上爲BACKUP interface eth0 #IP地址漂移到的網卡 virtual_router_id 6 #多個節點必須相同 priority 100 #優先級,備用節點的值必須低於主節點的值 advert_int 1 #通告間隔1秒 authentication { auth_type PASS #預共享密鑰認證 auth_pass 571f97b2 #密鑰 } virtual_ipaddress { 192.168.1.100/24 #VIP地址 } } EOF systemctl enable keepalived systemctl start keepalived
yum install -y keepalived cat << EOF > /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { notification_email { root@localhost #發送郵箱 } notification_email_from keepalived@localhost #郵箱地址 smtp_server 127.0.0.1 #郵件服務器地址 smtp_connect_timeout 30 router_id node2 #主機名,每一個節點不一樣便可 vrrp_mcast_group4 224.0.100.100 #組播地址 } vrrp_instance VI_1 { state BACKUP #在另外一個節點上爲MASTER interface eth0 #IP地址漂移到的網卡 virtual_router_id 6 #多個節點必須相同 priority 80 #優先級,備用節點的值必須低於主節點的值 advert_int 1 #通告間隔1秒 authentication { auth_type PASS #預共享密鑰認證 auth_pass 571f97b2 #密鑰 } virtual_ipaddress { 192.168.1.100/24 #漂移過來的IP地址 } } EOF systemctl enable keepalived systemctl start keepalived
yum install -y haproxy cat << EOF > /etc/haproxy/haproxy.cfg global log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon defaults mode tcp log global retries 3 timeout connect 10s timeout client 1m timeout server 1m frontend kubernetes bind *:6443 mode tcp default_backend kubernetes-master backend kubernetes-master balance roundrobin server master 192.168.1.1:6443 check maxconn 2000 server master2 192.168.1.2:6443 check maxconn 2000 server master3 192.168.1.3:6443 check maxconn 2000 EOF systemctl enable haproxy systemctl start haproxy
yum install -y haproxy cat << EOF > /etc/haproxy/haproxy.cfg global log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon defaults mode tcp log global retries 3 timeout connect 10s timeout client 1m timeout server 1m frontend kubernetes bind *:6443 mode tcp default_backend kubernetes-master backend kubernetes-master balance roundrobin server master 192.168.1.1:6443 check maxconn 2000 server master2 192.168.1.2:6443 check maxconn 2000 server master3 192.168.1.3:6443 check maxconn 2000 EOF systemctl enable haproxy systemctl start haproxy
#kubeadm init配置文件參考:https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file cd $HOME cat << EOF > /root/kubeadm-init.yaml apiVersion: kubeadm.k8s.io/v1beta1 kind: InitConfiguration # 本地的api server監聽地址和端口 localAPIEndpoint: advertiseAddress: 192.168.1.1 bindPort: 6443 # 本節點加入集羣的註冊信息,也就是kubectl get node看到的信息 nodeRegistration: # 若是不填寫name字段,則默認使用主機名,名字最好是是集羣惟一 # name: master1 criSocket: /var/run/dockershim.sock # 污點,NoSchedule表示不調度Pod到這臺node上 # 詳細信息參考:https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration # 集羣名字 clusterName: kubernetes # controller訪問api server的地址 # 若是是多個master的集羣,這裏就要寫前端lb的地址 controlPlaneEndpoint: "192.168.1.100:6443" apiServer: # 此處填全部的masterip和lbip和其它你可能須要經過它訪問apiserver的地址和域名或者主機名等 certSANs: - "master" - "master2" - "master3" - "192.168.1.1" - "192.168.1.2" - "192.168.1.3" - "192.168.1.4" - "192.168.1.5" - "192.168.1.100" - "127.0.0.1" timeoutForControlPlane: 4m0s certificatesDir: /etc/kubernetes/pki dns: type: CoreDNS etcd: # 讓k8s自行啓動etcd,多master的集羣必須使用external # local: # imageRepository: "k8s.gcr.io" # dataDir: "/var/lib/etcd" # 外部的etcd,全部的api server都要鏈接 external: endpoints: - "https://192.168.1.6:2379" - "https://192.168.1.7:2379" - "https://192.168.1.8:2379" caFile: "/etc/kubernetes/pki/etcd/etcd-ca.pem" certFile: "/etc/kubernetes/pki/etcd/etcd.pem" keyFile: "/etc/kubernetes/pki/etcd/etcd-key.pem" imageRepository: k8s.gcr.io kubernetesVersion: v1.14.1 networking: # service網段 serviceSubnet: "10.96.0.0/12" # pod網絡網段 podSubnet: "10.100.0.1/24" dnsDomain: "cluster.local" --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: "ipvs" --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: "systemd" # 若是swap啓用了,是否將kubelet斷定爲啓動失敗 failSwapOn: false EOF systemctl enable kubelet kubeadm config images pull --config kubeadm-init.yaml kubeadm init --config /root/kubeadm-init.yaml mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config cat << EOF > /etc/profile.d/kubernetes.sh source <(kubectl completion bash) EOF source /etc/profile.d/kubernetes.sh scp -r /etc/kubernetes/pki 192.168.1.2:/etc/kubernetes/ scp -r /etc/kubernetes/pki 192.168.1.3:/etc/kubernetes/
cd /etc/kubernetes/pki/ rm -fr apiserver.crt apiserver.key cd $HOME cat << EOF > /root/kubeadm-init.yaml apiVersion: kubeadm.k8s.io/v1beta1 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.1.2 bindPort: 6443 nodeRegistration: # name: master1 criSocket: /var/run/dockershim.sock taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration clusterName: kubernetes controlPlaneEndpoint: "192.168.1.100:6443" apiServer: certSANs: - "master" - "master2" - "master3" - "192.168.1.1" - "192.168.1.2" - "192.168.1.3" - "192.168.1.4" - "192.168.1.5" - "192.168.1.100" - "127.0.0.1" timeoutForControlPlane: 4m0s certificatesDir: /etc/kubernetes/pki dns: type: CoreDNS etcd: external: endpoints: - "https://192.168.1.6:2379" - "https://192.168.1.7:2379" - "https://192.168.1.8:2379" caFile: "/etc/kubernetes/pki/etcd/etcd-ca.pem" certFile: "/etc/kubernetes/pki/etcd/etcd.pem" keyFile: "/etc/kubernetes/pki/etcd/etcd-key.pem" imageRepository: k8s.gcr.io kubernetesVersion: v1.14.1 networking: serviceSubnet: "10.96.0.0/12" podSubnet: "10.100.0.1/24" dnsDomain: "cluster.local" --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: "ipvs" --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: "systemd" failSwapOn: false EOF systemctl enable kubelet kubeadm config images pull --config kubeadm-init.yaml kubeadm init --config /root/kubeadm-init.yaml mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config cat << EOF > /etc/profile.d/kubernetes.sh source <(kubectl completion bash) EOF source /etc/profile.d/kubernetes.sh
cd /etc/kubernetes/pki/ rm -fr apiserver.crt apiserver.key cd $HOME cat << EOF > /root/kubeadm-init.yaml apiVersion: kubeadm.k8s.io/v1beta1 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.1.3 bindPort: 6443 nodeRegistration: # name: master1 criSocket: /var/run/dockershim.sock taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration clusterName: kubernetes controlPlaneEndpoint: "192.168.1.100:6443" apiServer: certSANs: - "master" - "master2" - "master3" - "192.168.1.1" - "192.168.1.2" - "192.168.1.3" - "192.168.1.4" - "192.168.1.5" - "192.168.1.100" - "127.0.0.1" timeoutForControlPlane: 4m0s certificatesDir: /etc/kubernetes/pki dns: type: CoreDNS etcd: external: endpoints: - "https://192.168.1.6:2379" - "https://192.168.1.7:2379" - "https://192.168.1.8:2379" caFile: "/etc/kubernetes/pki/etcd/etcd-ca.pem" certFile: "/etc/kubernetes/pki/etcd/etcd.pem" keyFile: "/etc/kubernetes/pki/etcd/etcd-key.pem" imageRepository: k8s.gcr.io kubernetesVersion: v1.14.1 networking: serviceSubnet: "10.96.0.0/12" podSubnet: "10.100.0.1/24" dnsDomain: "cluster.local" --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: "ipvs" --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: "systemd" failSwapOn: false EOF systemctl enable kubelet kubeadm config images pull --config kubeadm-init.yaml kubeadm init --config /root/kubeadm-init.yaml mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config # 配置kubectl命令提示 cat << EOF > /etc/profile.d/kubernetes.sh source <(kubectl completion bash) EOF source /etc/profile.d/kubernetes.sh
#在master主機執行獲取join命令 kubeadm token create --print-join-command [root@master ~]# kubeadm token create --print-join-command kubeadm join 192.168.1.100:6443 --token zpru0r.jkvrdyy2caexr8kk --discovery-token-ca-cert-hash sha256:a45c091dbd8a801152aacd877bcaaaaf152697bfa4536272c905a83612b3bf22