Kubernetes v1.13.4
版本,請切換到 1.13-Release
分支.[root@linux-node1 ~]# cat /etc/hostname linux-node1 [root@linux-node2 ~]# cat /etc/hostname linux-node2 [root@linux-node3 ~]# cat /etc/hostname linux-node3 [root@linux-node4 ~]# cat /etc/hostname linux-node4
[root@linux-node1 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.150.141 linux-node1 192.168.150.142 linux-node2 192.168.150.143 linux-node3 192.168.150.144 linux-node4
systemctl disable --now firewalld NetworkManager setenforce 0 sed -ri '/^[^#]*SELINUX=/s#=.+$#=disabled#' /etc/selinux/config
yum install chrony -y cat <<EOF > /etc/chrony.conf server ntp.aliyun.com iburst stratumweight 0 driftfile /var/lib/chrony/drift rtcsync makestep 10 3 bindcmdaddress 127.0.0.1 bindcmdaddress ::1 keyfile /etc/chrony.keys commandkey 1 generatecommandkey logchange 0.5 logdir /var/log/chrony EOF systemctl restart chronyd systemctl enable --now chronyd
#由於市面上包管理下內核版本太低,安裝docker後不管centos仍是ubuntu會有以下bug,4.15的內核依然存在 kernel:unregister_netdevice: waiting for lo to become free. Usage count = 1 #安裝必要軟件包 yum install wget git jq psmisc vim perl -y #升級內核須要使用 elrepo 的yum 源,首先咱們導入 elrepo 的 key並安裝 elrepo 源 rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm #查看可用內核 yum --disablerepo="*" --enablerepo="elrepo-kernel" list available --showduplicates #自選版本內核安裝方法 export Kernel_Vsersion=4.18.16-1 wget http://mirror.rc.usf.edu/compute_lock/elrepo/kernel/el7/x86_64/RPMS/kernel-ml{,-devel}-${Kernel_Vsersion}.el7.elrepo.x86_64.rpm yum localinstall -y kernel-ml* #查看這個內核裏是否有這個內核模塊 find /lib/modules -name '*nf_conntrack_ipv4*' -type f #修改內核啓動順序,默認啓動的順序應該爲1,升級之後內核是往前面插入,爲0(若是每次啓動時須要手動選擇哪一個內核,該步驟能夠省略) grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg #使用下面命令看看確認下是否啓動默認內核指向上面安裝的內核 grubby --default-kernel #docker官方的內核檢查腳本建議(RHEL7/CentOS7: User namespaces disabled; add 'user_namespace.enable=1' to boot command line),使用下面命令開啓 grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)" #從新加載內核 reboot
$ :> /etc/modules-load.d/ipvs.conf $ module=( ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp ) $ for kernel_module in ${module[@]};do /sbin/modinfo -F filename $kernel_module |& grep -qv ERROR && echo $kernel_module >> /etc/modules-load.d/ipvs.conf || : done $ systemctl enable --now systemd-modules-load.service
$ cat <<EOF > /etc/sysctl.d/k8s.conf # https://github.com/moby/moby/issues/31208 # ipvsadm -l --timout # 修復ipvs模式下長鏈接timeout問題 小於900便可 net.ipv4.tcp_keepalive_time = 600 net.ipv4.tcp_keepalive_intvl = 30 net.ipv4.tcp_keepalive_probes = 10 net.ipv6.conf.all.disable_ipv6 = 1 net.ipv6.conf.default.disable_ipv6 = 1 net.ipv6.conf.lo.disable_ipv6 = 1 net.ipv4.neigh.default.gc_stale_time = 120 net.ipv4.conf.all.rp_filter = 0 net.ipv4.conf.default.rp_filter = 0 net.ipv4.conf.default.arp_announce = 2 net.ipv4.conf.lo.arp_announce = 2 net.ipv4.conf.all.arp_announce = 2 net.ipv4.ip_forward = 1 net.ipv4.tcp_max_tw_buckets = 5000 net.ipv4.tcp_syncookies = 1 net.ipv4.tcp_max_syn_backlog = 1024 net.ipv4.tcp_synack_retries = 2 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.netfilter.nf_conntrack_max = 2310720 fs.inotify.max_user_watches=89100 fs.may_detach_mounts = 1 fs.file-max = 52706963 fs.nr_open = 52706963 net.bridge.bridge-nf-call-arptables = 1 vm.swappiness = 0 vm.overcommit_memory=1 vm.panic_on_oom=0 EOF $ sysctl --system
[root@linux-node1 ~]# ssh-keygen -t rsa [root@linux-node1 ~]# ssh-copy-id linux-node1 [root@linux-node1 ~]# ssh-copy-id linux-node2 [root@linux-node1 ~]# ssh-copy-id linux-node3 [root@linux-node1 ~]# ssh-copy-id linux-node4 [root@linux-node1 ~]# scp /etc/hosts linux-node2:/etc/ [root@linux-node1 ~]# scp /etc/hosts linux-node3:/etc/ [root@linux-node1 ~]# scp /etc/hosts linux-node4:/etc/
2.1 安裝Salt SSH(注意:老版本的Salt SSH不支持Roster定義Grains,須要2017.7.4以上版本)html
[root@linux-node1 ~]# yum install -y https://mirrors.aliyun.com/saltstack/yum/redhat/salt-repo-latest-2.el7.noarch.rpm [root@linux-node1 ~]# sed -i "s/repo.saltstack.com/mirrors.aliyun.com\/saltstack/g" /etc/yum.repos.d/salt-latest.repo [root@linux-node1 ~]# yum install -y salt-ssh git unzip
2.2 獲取本項目代碼,並放置在 /srv
目錄node
[root@linux-node1 ~]# git clone https://github.com/skymyyang/salt-k8s-ha.git [root@linux-node1 ~]# cd salt-k8s-ha/ [root@linux-node1 ~]# mv * /srv/ [root@linux-node1 srv]# /bin/cp /srv/roster /etc/salt/roster [root@linux-node1 srv]# /bin/cp /srv/master /etc/salt/master
2.3 下載二進制文件,也能夠自行官方下載,爲了方便國內用戶訪問,請在百度雲盤下載,下載k8s-v1.12.5-auto.zip。 下載完成後,將 files 目錄移動到 /srv/salt/k8s/
目錄下,並解壓 Kubernetes二進制文件下載地址: https://pan.baidu.com/s/1Ag2ocpVmkg-uEoV13A7HFw
linux
[root@linux-node1 ~]# cd /srv/salt/k8s/ [root@linux-node1 k8s]# unzip k8s-v1.12.5-auto.zip [root@linux-node1 k8s]# rm -f k8s-v1.12.5-auto.zip [root@linux-node1 k8s]# ls -l files/ total 0 drwxr-xr-x 2 root root 94 Jan 18 19:19 cfssl-1.2 drwxr-xr-x 2 root root 195 Jan 18 19:19 cni-plugins-amd64-v0.7.4 drwxr-xr-x 3 root root 123 Jan 18 19:19 etcd-v3.3.10-linux-amd64 drwxr-xr-x 2 root root 47 Jan 18 19:19 flannel-v0.10.0-linux-amd64 drwxr-xr-x 3 root root 17 Jan 18 19:19 k8s-v1.12.5
[root@linux-node1 ~]# vim /etc/salt/roster linux-node1: host: 192.168.150.141 user: root priv: /root/.ssh/id_rsa minion_opts: grains: k8s-role: master etcd-role: node etcd-name: etcd-node1 linux-node2: host: 192.168.150.142 user: root priv: /root/.ssh/id_rsa minion_opts: grains: k8s-role: master etcd-role: node etcd-name: etcd-node2 linux-node3: host: 192.168.150.143 user: root priv: /root/.ssh/id_rsa minion_opts: grains: k8s-role: master etcd-role: node etcd-name: etcd-node3 linux-node4: host: 192.168.150.144 user: root priv: /root/.ssh/id_rsa minion_opts: grains: k8s-role: node
[root@k8s-m1 ~]# vim /srv/pillar/k8s.sls #設置Master的IP地址(必須修改) MASTER_IP_M1: "192.168.150.141" MASTER_IP_M2: "192.168.150.142" MASTER_IP_M3: "192.168.150.143" #設置Master的HOSTNAME完整的FQDN名稱(必須修改) MASTER_H1: "linux-node1" MASTER_H2: "linux-node2" MASTER_H3: "linux-node3" #設置ETCD集羣訪問地址(必須修改) ETCD_ENDPOINTS: "https://192.168.150.141:2379,https://192.168.150.142:2379,https://192.168.150.143:2379" FLANNEL_ETCD_PREFIX: "/kubernetes/network" #設置ETCD集羣初始化列表(必須修改) ETCD_CLUSTER: "etcd-node1=https://192.168.150.141:2380,etcd-node2=https://192.168.150.142:2380,etcd-node3=https://192.168.150.143:2380" #經過Grains FQDN自動獲取本機IP地址,請注意保證主機名解析到本機IP地址 NODE_IP: {{ grains['fqdn_ip4'][0] }} HOST_NAME: {{ grains['fqdn'] }} #設置BOOTSTARP的TOKEN,能夠本身生成 BOOTSTRAP_TOKEN: "be8dad.da8a699a46edc482" TOKEN_ID: "be8dad" TOKEN_SECRET: "da8a699a46edc482" ENCRYPTION_KEY: "8eVtmpUpYjMvH8wKZtKCwQPqYRqM14yvtXPLJdhu0gA=" #配置Service IP地址段 SERVICE_CIDR: "10.1.0.0/16" #Kubernetes服務 IP (從 SERVICE_CIDR 中預分配) CLUSTER_KUBERNETES_SVC_IP: "10.1.0.1" #Kubernetes DNS 服務 IP (從 SERVICE_CIDR 中預分配) CLUSTER_DNS_SVC_IP: "10.1.0.2" #設置Node Port的端口範圍 NODE_PORT_RANGE: "20000-40000" #設置POD的IP地址段 POD_CIDR: "10.2.0.0/16" #設置集羣的DNS域名 CLUSTER_DNS_DOMAIN: "cluster.local." #設置Docker Registry地址 #DOCKER_REGISTRY: "https://192.168.150.135:5000" #設置Master的VIP地址(必須修改) MASTER_VIP: "192.168.150.253" #設置網卡名稱 VIP_IF: "ens32"
[root@k8s-m1 ~]# salt-ssh '*' test.ping
執行高級狀態,會根據定義的角色再對應的機器部署對應的服務nginx
[root@linux-node1 ~]# salt-ssh -L 'linux-node1,linux-node2,linux-node3' state.sls k8s.etcd
[root@linux-node1 ~]# salt-ssh '*' state.highstate
因爲包比較大,根據電腦硬件配置,這裏執行時間較長,5分鐘+,喝杯咖啡休息一下,若是執行有失敗能夠再次執行便可!git
#先驗證etcd [root@linux-node1 ~]# source /etc/profile [root@linux-node1 ~]# etcdctl --endpoints=https://192.168.150.141:2379 \ --ca-file=/opt/kubernetes/ssl/ca.pem \ --cert-file=/opt/kubernetes/ssl/etcd.pem \ --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health [root@linux-node1 ~]# etcdctl --endpoints=https://192.168.150.141:2379 \ --ca-file=/opt/kubernetes/ssl/ca.pem \ --cert-file=/opt/kubernetes/ssl/etcd.pem \ --key-file=/opt/kubernetes/ssl/etcd-key.pem member list [root@linux-node1 ~]# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-2 Healthy {"health":"true"} etcd-1 Healthy {"health":"true"} etcd-0 Healthy {"health":"true"} [root@k8s-m1 ~]# kubectl get node NAME STATUS ROLES AGE VERSION linux-node1 Ready master 14m v1.12.5 linux-node2 Ready master 24m v1.12.5 linux-node3 Ready master 24m v1.12.5 linux-node4 Ready node 30m v1.12.5
[root@linux-node1 ~]# kubectl create deployment nginx --image=nginx:alpine deployment.apps/nginx created 須要等待拉取鏡像,可能稍有的慢,請等待。 [root@linux-node1 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE nginx-54458cd494-8fj47 1/1 Running 0 13s [root@linux-node1 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-54458cd494-8fj47 1/1 Running 0 111s 10.2.70.3 linux-node1 <none> <none> 測試聯通性 [root@linux-node1 ~]# ping -c 1 10.2.70.3 PING 10.2.69.2 (10.2.69.2) 56(84) bytes of data. 64 bytes from 10.2.69.2: icmp_seq=1 ttl=61 time=2.02 ms --- 10.2.69.2 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 2.028/2.028/2.028/0.000 ms [root@linux-node1 ~]# curl --head http://10.2.70.3 HTTP/1.1 200 OK Server: nginx/1.15.8 Date: Wed, 27 Feb 2019 09:52:48 GMT Content-Type: text/html Content-Length: 612 Last-Modified: Thu, 31 Jan 2019 23:32:11 GMT Connection: keep-alive ETag: "5c53857b-264" Accept-Ranges: bytes 測試擴容,將Nginx應用的Pod副本數量拓展到2個節點 [root@linux-node1 ~]# kubectl scale deployment nginx --replicas=2 deployment.extensions/nginx scaled [root@linux-node1 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE nginx-54458cd494-8fj47 1/1 Running 0 5m4s nginx-54458cd494-qzhpf 1/1 Running 0 17s
/etc/hosts
中繼續增長對應的解析。確保全部節點都能解析/etc/salt/roster
裏面,增長對應的機器salt-ssh '*' state.highstate
[root@linux-node5 ~]# vim /etc/salt/roster linux-node5: host: 192.168.150.145 user: root priv: /root/.ssh/id_rsa minion_opts: grains: k8s-role: node [root@linux-node1 ~]# salt-ssh 'linux-node5' state.highstate
你能夠安裝Kubernetes必備的插件。如何安裝必備的插件。請參考該項目的原地址。github