kubernetes 二進制安裝v2

 

主機清單及軟件規劃node

 

 

    主機的初始化配置linux

                        systemctl stop firewalld
                        systemctl disabled firewalld
                        sed -i 's/enforcing/disabled' /etc/selinux/config
                        swapoff -a
                        ntpdate time.windows.com
                 添加hosts文件解析
                        192.168.1.61 k8s-lb1
                        192.168.1.62 k8s-lb2
                        192.168.1.63 k8s-master1
                        192.168.1.64 k8s-master2
                        192.168.1.65 k8s-node1
                        192.168.1.66 k8s-node2nginx

 

使用cfssl工具自簽證書git

                             #執行cfssl.sh腳本使本機支持cfssl指令  
[root@ansible ~]# cd /root/k8s/TLS/
[root@ansible TLS]# ls
cfssl  cfssl-certinfo  cfssljson  cfssl.sh  etcd  k8s
[root@ansible TLS]# ./cfssl.sh 

                             #爲etcd頒發證書
[root@ansible TLS]# cd etcd/
[root@ansible etcd]# ls
ca-config.json  ca-csr.json  server-csr.json  generate_etcd_cert.sh  
[root@ansible etcd]#        
                             #使用cfssl,根據 ca-csr.json生成一套CA
                             #而後再根據CA文件和ca-config.json、server-csr.json
                             #爲etcd數據生成證書文件,這兩步操做經過腳本
                             #generate_etcd_cert.sh來完成
[root@ansible etcd]# ./generate_etcd_cert.sh                            
[root@ansible etcd]# ls *.pem
ca-key.pem  ca.pem  server-key.pem  server.pem
[root@ansible etcd]# 
                 
                             #爲apisever頒發證書
[root@ansible TLS]# cd k8s/                             
[root@ansible k8s]# ls 
ca-config.json  ca-csr.json  kube-proxy-csr.json  server-csr.json
generate_k8s_cert.sh
[root@ansible k8s]#             #使用腳本文件generate_k8s_cert.sh根據         
                             #生成一套CA文件
                             #根據CA文件與server-csr.json、ca-config.json
                             #爲apisever頒發證書
                             #根據CA文件與ca-config.json、kube-proxy-csr.json
                             #爲kube-proxy分發證書
[root@ansible k8s]# ./generate_k8s_cert.sh                             
[root@ansible k8s]# ls *.pem
ca-key.pem  ca.pem  kube-proxy-key.pem  kube-proxy.pem  server-key.pem  server.pem
[root@ansible k8s]# 

 

配置etcd數據庫github

                             #把ETCD目錄下的etcd.service文件拷貝到etcd數據庫的
                             #/usr/lib/systemd/system/
                             #把cfssl爲etcd生成的證書文件拷貝到二進制文件包的
                             #etcd/ssl/目錄下
[root@ansible k8s]# cd ETCD/
[root@ansible ETCD]# ls
etcd  etcd.service  etcd.tar.gz       
[root@ansible ETCD]# cd etcd/ssl/            
[root@ansible ssl]# ls *.pem
ca.pem  server-key.pem  server.pem            
[root@ansible ETCD]# cd etcd/cfg/
[root@ansible cfg]# ls
etcd.conf
[root@ansible cfg]# 
                             #修改etcd數據庫服務器/usr/lib/systemd/system/etcd.service
                             #20-21行指定client訪問ectd集羣時用的證書文件在哪裏
                             #24行指定頒發這個證書的CA機構
                             #22-23指定etcd集羣內部之間通信使用的證書文件
                             #25行指定頒發這個證書的CA機構
                             #非自簽證書不須要24,25                             
 20         --cert-file=/opt/etcd/ssl/server.pem \
 21         --key-file=/opt/etcd/ssl/server-key.pem \
 22         --peer-cert-file=/opt/etcd/ssl/server.pem \
 23         --peer-key-file=/opt/etcd/ssl/server-key.pem \
 24         --trusted-ca-file=/opt/etcd/ssl/ca.pem \
 25         --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
                
                             #修改etcd/cfg/etcd.conf
                             #定義etcd節點的主機名
                             #指定供etcd集羣內部各節點相互通信時本身的地址和端口
                             #指定client訪問etcd集羣時本身的地址和端口
                             #[Clustering]部分
                             #指定etcd集羣內各節點之間發通告信息用的地址和端口
                             #通告client訪問etcd集羣的地址和端口
                             #指定etcd集羣中的成員
                             #指定etcd集羣內部成員之間認識口令爲etcd-cluster
                             #該節點加入etcd集羣時,屬於新建集羣因此是new
                             #不然爲exsiting
  2 #[Member]
  3 ETCD_NAME="etcd-1"
  4 ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
  5 ETCD_LISTEN_PEER_URLS="https://192.168.1.63:2380"
  6 ETCD_LISTEN_CLIENT_URLS="https://192.168.1.63:2379"
  8 #[Clustering]
  9 ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.63:2380"
 10 ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.63:2379"
 11 ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.1.63:2380,etcd-2=
                            https://192.168.1.65:2380,etcd-3=https://192.168.1.66:2380"
 12 ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
 13 ETCD_INITIAL_CLUSTER_STATE="new"                             
                            
                             #以上兩個配置文件修改後把整個etcd目錄拷貝到etcd數據庫的/opt/下
                             #全部etcd節點都要拷貝/etc/目錄和service文件
                             
                             #啓動服務
systemctl daemon-reload 
systemctl  start  etcd.service    
systemctl  enable etcd            

注意事項:
etcd/ssl目錄下的證書若是沒有更新,etcd服務沒法啓動
ansible 的copy模塊拷貝etcd目錄到etcd數據庫節點時etcd/bin/目錄下的文件會丟失x權限 
etcd/cfg/etcd.conf文件中IP地址和節點名稱須要修改
                
ansible etcd -m copy -a "src=/root/k8s/ETCD/etcd.service  dest=/usr/lib/systemd/system/" 
ansible etcd -m copy -a "src=/root/k8s/ETCD/etcd  dest=/opt/ mode=0755"    

 

配置Master節點web

                             #(1)把kube-apiserver,kube-controller-manager,kube-scheduler的service
                             #文件拷貝到Master主機節點的/usr/lib/systemd/system/目錄下
                             #(2)把cfssl工具爲api-server生成的密鑰拷貝到kubernetes/ssl/目錄下
                             #查看kubernetes目錄結構:
                             #(3)bin目錄是二進制的可執行文件,githib下載新版本後替換這4個文件
                             #便可對k8s集羣的maste節點程序升級
                             #(4)bin/目錄下的kubectl文件拷貝到master節點的環境變量目錄
                             #/usr/local/bin/方便在master節點使用kubectl工具
[root@ansible ~]# cd /root/k8s/MASTER/
[root@ansible MASTER]# ls
kube-apiserver.service  kube-controller-manager.service  kubernetes  kube-scheduler.service                                             
[root@ansible MASTER]# cd kubernetes/ssl/
[root@ansible ssl]# ls
ca-key.pem  ca.pem  server-key.pem  server.pem
[root@ansible ssl]#         
[root@ansible kubernetes]# tree .
.
├── bin
│   ├── kube-apiserver
│   ├── kube-controller-manager
│   ├── kubectl
│   └── kube-scheduler
├── cfg
│   ├── kube-apiserver.conf
│   ├── kube-controller-manager.conf
│   ├── kube-scheduler.conf
│   └── token.csv
├── logs
└── ssl
    ├── ca-key.pem
    ├── ca.pem
    ├── server-key.pem
    └── server.pem
[root@ansible kubernetes]# 
                             #(5)修改apiserver配置文件
                             #定義日誌的輸出級別及k8s的日誌目錄
                             #指定apiserver去鏈接etcd時的地址和端口
                             #當前apiserver監控的地址和端口
                             #通告node哪一個地址能夠鏈接到本機的api
                             #容許使用超級權限建立容器
                             #指定service的IP從10.0.0.0/24地址段中取
                             #指定能夠使用的插件,k8s的高級功能 
                             #其它模塊訪問apiserver時,使用RBAC認證
                             #爲node分配權限時使用的token文件位置
                             #service資源暴露時可用端口的範圍
  1 KUBE_APISERVER_OPTS="--logtostderr=false \
  2 --v=2 \
  3 --log-dir=/opt/kubernetes/logs \
  4 --etcd-servers=https://192.168.1.63:2379,https://192.168.1.65:2379, \
                   https://192.168.1.66:2379
  5 --bind-address=192.168.1.63 \
  6 --secure-port=6443 \
  7 --advertise-address=192.168.1.63 \
  8 --allow-privileged=true \
  9 --service-cluster-ip-range=10.0.0.0/24 \
 10 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,Res    ourceQuota,NodeRestriction \
 11 --authorization-mode=RBAC,Node \
 12 --enable-bootstrap-token-auth=true \
 13 --token-auth-file=/opt/kubernetes/cfg/token.csv \
 14 --service-node-port-range=30000-32767 \
                             #api-server訪問kubelet時使用證書的位置
 15 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
 16 --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
                             #api-server訪問https時使用的證書位置
 17 --tls-cert-file=/opt/kubernetes/ssl/server.pem  \
 18 --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
 19 --client-ca-file=/opt/kubernetes/ssl/ca.pem \
                             #service-account使用的私鑰
 20 --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
                             #api-server訪問etcd時使用證書的位置
 21 --etcd-cafile=/opt/etcd/ssl/ca.pem \
 22 --etcd-certfile=/opt/etcd/ssl/server.pem \
 23 --etcd-keyfile=/opt/etcd/ssl/server-key.pem \
                             #對訪問api-server動做作審計
 24 --audit-log-maxage=30 \
 25 --audit-log-maxbackup=3 \
 26 --audit-log-maxsize=100 \
 27 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
                             #(6)修改controller-manager配置文件
                             #指定日誌級別和目錄
                             #開啓leader-elect功能配合etcd的選舉
                             #controller-manager去本地的8080端口去找apiserver
                             #controller-manager只監聽本機地址,協助apiserver
                             #完成工做,它不須要與外部通信
                             #allocate-node-cidrs表示是否容許安裝cni的插件
                             #cni插件的IP地址要與10.244.0.0/16地址段一致
                             #service的IP地址範圍爲10.0.0.0/24
  1 KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
  2 --v=2 \
  3 --log-dir=/opt/kubernetes/logs \
  4 --leader-elect=true \
  5 --master=127.0.0.1:8080 \
  6 --address=127.0.0.1 \
  7 --allocate-node-cidrs=true \
  8 --cluster-cidr=10.244.0.0/16 \
  9 --service-cluster-ip-range=10.0.0.0/24 \
                #node加入集羣會自動頒發kubelet的證書,而kubelet的證書
                             #是由controller-manager經過以下兩行的證書爲之頒發
 10 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
 11 --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \
                             #ServiceAccount認證用以下兩行的CA和證書
 12 --root-ca-file=/opt/kubernetes/ssl/ca.pem \
 13 --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
                             #爲每一個node頒發的kubelet證書的時間是10年
 14 --experimental-cluster-signing-duration=87600h0m0s"
 
                             #(7)修改scheduler配置文件
                             #指定日誌級別和目錄
                             #schedulerc使用選舉
                             #鏈接本地的aipserver時用127.0.0.1:8080
                             #監控本地地址
  1 KUBE_SCHEDULER_OPTS="--logtostderr=false \
  2 --v=2 \
  3 --log-dir=/opt/kubernetes/logs \
  4 --leader-elect \
  5 --master=127.0.0.1:8080 \
  6 --address=127.0.0.1"
                             #(8)把kubernetes目錄拷貝到Master主機的/opt/目錄
                             #完成對aipsever,controller-manager,scheduler的安裝
                             #(9)分別啓動這三個服務並查看日誌
systemctl start kube-apiserver kube-controller-manager  kube-scheduler
ps -ef | grep kube-apiserver kube-controller-manager kube-scheduler
less  kube-apiserver.INFO 
less kube-controller-manager.INFO 
less kube-scheduler.INFO  

                             #(10)配置開機自動啓動,查看資源
for i in $(ls /opt/kubernetes/bin/);do systemctl enable $i;done
kubectl get cs

                             #(11)啓用TLS Bootstrapping
                             #當work節點後續逐漸加入集羣時能夠自動爲kubelet頒發證書
                             #api-server配置中添加第13行配置指定token.csv文件的位置
                             #是/opt/kubernetes/cfg/token.csv
                             #token.csv文件格式:
                             #                token,用戶,uid,用戶組
                             #其中token的值也能夠手動本身生成:
                             #        head -c 16 /dev/urandom  |od -An -t x | tr -d ‘’
                             #注意:
                             #token.csv文件中token的值必需要與node節點上的
                             #bootstrap.kubeconfig配置裏一致
[root@master1 cfg]# more token.csv 
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
[root@master1 cfg]#
                             #爲kubelet-bootstrap用戶賦予權限
                             #把用戶kubelet-bootstrap綁定到system:
                             #node-bootstrapper這個組裏面,使它具備受權權限
[root@master1 cfg]# kubectl create clusterrolebinding kubelet-bootstrap \
 --clusterrole=system:node-bootstrapper \
 --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
[root@master1 cfg]#

 

部署Node節點docker

                              #(1)把docker目錄的二進文件拷貝到Node節點的/usr/bin/下
                              #docker的配置文件daemon.json拷貝到Node節點的/etc/docker
                              #(2)把kubelet,kube-proxy,docker的service文件拷貝到node節點的
                              #/usr/lib/systemd/system/
                              #(3)把cfssl工具爲kube-proxy生成的證書拷貝到kubernetes/ssl/
                              #(4)查看Node節點kubernetes的目錄結構:
[root@ansible NODE]# tar -zxvf k8s-node.tar.gz
[root@ansible NODE]# ls
daemon.json  docker-18.09.6.tgz  k8s-node.tar.gz  kube-proxy.service
docker       docker.service      kubelet.service  kubernetes
[root@ansible NODE]#tar -zxvf docker-18.09.6.tgz 
[root@ansible NODE]#cd docker/
[root@ansible docker]# ls
containerd       ctr     dockerd      docker-proxy
containerd-shim  docker  docker-init  runc
[root@ansible docker]# cd /root/k8s/NODE/kubernetes
[root@ansible kubernetes]# tree .
.
├── bin
│   ├── kubelet
│   └── kube-proxy
├── cfg
│   ├── bootstrap.kubeconfig
│   ├── kubelet.conf
│   ├── kubelet-config.yml
│   ├── kube-proxy.conf
│   ├── kube-proxy-config.yml
│   └── kube-proxy.kubeconfig
├── logs
└── ssl
    ├── ca.pem
    ├── kube-proxy-key.pem
    └── kube-proxy.pem
[root@ansible kubernetes]# 
                              #(5)修改kubelet.conf文件
                              #修改日誌等級和日誌目錄
                              #當前結點的主機名爲Node1
                              #指定網絡插件爲cni
                              #指定kubelet.kubeconfig的文件路徑
                              #指定bootstrap.kubeconfig的文件路徑
                              #指定kubelet-config.yml文件路徑 
                              #爲我自動頒發的證書放在/opt/kubernetes/ssl
                              #啓動pod時用到的鏡像pause-amd64:3.0
  1 KUBELET_OPTS="--logtostderr=false \
  2 --v=2 \
  3 --log-dir=/opt/kubernetes/logs \
  4 --hostname-override=Node1 \
  5 --network-plugin=cni \
  6 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
  7 --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
  8 --config=/opt/kubernetes/cfg/kubelet-config.yml \
  9 --cert-dir=/opt/kubernetes/ssl \
 10 --pod-infra-container-image=lizhenliang/pause-amd64:3.0"
                              #(6)修改bootstrap.kubeconfig文件
                              #bootstrap主要目的是爲加入到k8s集羣的node節點自動頒發kubelet
                              #證書,全部要鏈接apisever的模塊都是需證書.這個文件能夠用指令
                              #kubectl config生成
                              #指定使用到的ca證書是/opt/kubernetes/ssl/ca.pem
                              #指定Master節點的地址192.168.1.63:6443
                              #定義token的值,
                              #這個token要與/opt/kubernetes/cfg/token.csv中一致
  1 apiVersion: v1
  2 clusters:
  3 - cluster:
  4     certificate-authority: /opt/kubernetes/ssl/ca.pem
  5     server: https://192.168.1.63:6443
  6   name: kubernetes
  7 contexts:
  8 - context:
  9     cluster: kubernetes
 10     user: kubelet-bootstrap
 11   name: default
 12 current-context: default
 13 kind: Config
 14 preferences: {}
 15 users:
 16 - name: kubelet-bootstrap
 17   user:
 18     token: c47ffb939f5ca36231d9e3121a252940
 
                               #(7)修改kubelet-config.yml文件
                              #定義的使用對象爲KubeletConfiguration
                              #指定api版本
  1 kind: KubeletConfiguration
  2 apiVersion: kubelet.config.k8s.io/v1beta1
  3 address: 0.0.0.0
  4 port: 10250               #kublet當前監聽的地址和端口
  5 readOnlyPort: 10255
  6 cgroupDriver: cgroupfs    #這裏的驅動要與docke inof中顯示一致
  7 clusterDNS:               #kubelet默認配置的內部DNS地址
  8 - 10.0.0.2
  9 clusterDomain: cluster.local
 10 failSwapOn: false         #關閉swapon分區
 11 authentication:           #如下8行是認證信息
 12   anonymous:
 13     enabled: false
 14   webhook:
 15     cacheTTL: 2m0s
 16     enabled: true
 17   x509:
 18     clientCAFile: /opt/kubernetes/ssl/ca.pem
 19 authorization:
 20   mode: Webhook
 21   webhook:
 22     cacheAuthorizedTTL: 5m0s
 23     cacheUnauthorizedTTL: 30s
 24 evictionHard:
 25   imagefs.available: 15%
 26   memory.available: 100Mi
 27   nodefs.available: 10%
 28   nodefs.inodesFree: 5%
 29 maxOpenFiles: 1000000
 30 maxPods: 110
 
                               #(8)修改kube-proxy.kubeconfig文件
                              #kube-proxy鏈接apiserver時用的CA證書在
                              #/opt/kubernetes/ssl/ca.pem
                              #kube-proxy使用到的證書文件:
                              #/opt/kubernetes/ssl/kube-proxy.pem
                              #/opt/kubernetes/ssl/kube-proxy-key.pem
  1 apiVersion: v1
  2 clusters:
  3 - cluster:                
  4     certificate-authority: /opt/kubernetes/ssl/ca.pem
  5     server: https://192.168.1.63:6443
  6   name: kubernetes
  7 contexts:
  8 - context:
  9     cluster: kubernetes
 10     user: kube-proxy
 11   name: default
 12 current-context: default
 13 kind: Config
 14 preferences: {}
 15 users:
 16 - name: kube-proxy 
 17   user:                    
 18     client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
 19     client-key: /opt/kubernetes/ssl/kube-proxy-key.pem

                               #(9)修改kube-proxy-config.yml文件
                              #該yml文件是爲了動態調整kube-proxy配置
                              #kube-proxy監聽的地址爲0.0.0.0
                              #經過暴露0.0.0.0:10249,供監控系統使用
  1 kind: KubeProxyConfiguration
  2 apiVersion: kubeproxy.config.k8s.io/v1alpha1
  3 address: 0.0.0.0
  4 metricsBindAddress: 0.0.0.0:10249
  5 clientConnection:
  6   kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
  7 hostnameOverride: Node1   #註冊到k8s集羣的主機名
  8 clusterCIDR: 10.0.0.0/24  #集羣中service的IP地址段
  9 mode: ipvs
 10 ipvs:
 11   scheduler: "rr"
 12 iptables:
 13   masqueradeAll: true
                                           
                              #(10)把kubenetes目錄拷貝到node節點的/opt目錄 
                              #(11)啓動服務並查看日誌
systemctl start kubelet kube-proxy    
systemctl enable kubelet 
systemctl enable  kube-proxy
less /opt/kubernetes/logs/kubelet.INFO    
less /opt/kubernetes/logs/kube-proxy.INFO

                              #(12)在master節點查看node向kubelet-bootstrap請求發
                              #放證書的信息,並手動爲它頒發證書
                              #(13)在node1上查看master節點爲它頒發的kubelet證書
                              #同時會自動生成kubelet.kubeconfig文件,kubelet
                              #使用這個文件鏈接apiserve
[root@master1 ~]# kubectl get csr
NAME                                                 AGE REQUESTOR         CONDITION
node-csr-XcxFwsj3qE6-c9ayjPe2sHehWiwepsquOBIGyfP5orQ 27m kubelet-bootstrap Pending
[root@master1 ~]#                              
[root@master1 ~]# kubectl certificate approve node-csr-XcxFwsj3qE6-c9ayjPe2sHehWi
                                                                wepsquOBIGyfP5orQ                              
[root@Node1 ~]# cd /opt/kubernetes/ssl/
[root@Node1 ssl]# ls  kubelet*
kubelet-client-2020-08-30-09-30-55.pem  kubelet-client-current.pem  kubelet.crt  
kubelet.key                              
[root@Node1 ~]# ls /opt/kubernetes/cfg/kubelet.kubeconfig 
/opt/kubernetes/cfg/kubelet.kubeconfig
[root@Node1 ~]#                              
                              
                              #(14)把kubenetes目錄拷貝到第二個node節點/opt目錄                               
                              #(16)啓動服務並查看日誌    
                              #(17)在maste上手動爲第二個node頒發證書                  
                              #(18)在maste上查看node節點信息                              
[root@master1 ~]# kubectl get node
NAME    STATUS     ROLES    AGE   VERSION
node1   NotReady   <none>   59m   v1.16.0
node2   NotReady   <none>   45s   v1.16.0
[root@master1 ~]#    

 

部署CNI網絡shell

https://github.com/containernetworking/plugins/releases
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/數據庫

                              #(1)在node節點上建立cni須要使用的目錄                                    
                              #(2)把cni的壓縮包直接拷貝到worker節點的/opt/cni/bin/
                              #(3)解壓壓縮包後,node能夠接收第三方網絡的cni插件
                              #(4)下載flannel組件對應的yml文件kube-flannel.yaml
                              #(5)經過yml文件下載安裝flannel組件
                              #(6)flannel組件是以鏡像方式存在,經過指令查看
                              #(7)此時node工做狀態是否已變正常(Ready)
                              #(8)其它node節點作一樣操做 
[root@Node1 ~]# mkdir  -p  /opt/cni/bin  /etc/cni/net.d                              
[root@Node1 ~]# cd /opt/cni/bin/                              
[root@Node1 bin]# tar -zxvf cni-plugins-linux-amd64-v0.8.2.tgz                              
[root@master1 ~]# kubectl apply -f kube-flannel.yaml                              
[root@master1 ~]# kubectl get pods -n kube-system
NAME                          READY   STATUS    RESTARTS   AGE
kube-flannel-ds-amd64-h9dg6   1/1     Running   0          42m
kube-flannel-ds-amd64-w7b9x   1/1     Running   0          42m                              
[root@master1 ~]# kubectl get node      
NAME    STATUS   ROLES    AGE     VERSION
node1   Ready    <none>   6h9m    v1.16.0
node2   Ready    <none>   5h10m   v1.16.0
[root@master1 ~]#     
 
                             #kube-flannel.yaml文件內容,該文件中的網絡信息會寫
                             #入到/etc/cni/net.d中
---
 97 kind: ConfigMap                     
 98 apiVersion: v1
 99 metadata:
100   name: kube-flannel-cfg
101   namespace: kube-system
102   labels:
103     tier: node
104     app: flannel
105 data:
106   cni-conf.json: |
107     {
108       "cniVersion": "0.2.0",
109       "name": "cbr0",
110       "plugins": [
111         {
112           "type": "flannel",
113           "delegate": {
114             "hairpinMode": true,
115             "isDefaultGateway": true
116           }
117         },
118         {
119           "type": "portmap",
120           "capabilities": {
121             "portMappings": true
122           }
123         }
124       ]
125     }
126   net-conf.json: |                  #flannel使用的網絡信息,這個信息要與
127     {                               #/opt/kubernetes/cfg/kube-controller-manager.conf
128       "Network": "10.244.0.0/16",   #中cluster-cidr信息一致
129       "Backend": {
130         "Type": "vxlan"             #網絡封裝模式是vxlan
131       }
132     }

133 ---
134 apiVersion: apps/v1
135 kind: DaemonSet                     #DaemonSet的類型,表示每一個worker node節點上都會一個
136 metadata:                           #獨立的進程,維護各自的路由表
137   name: kube-flannel-ds-amd64
138   namespace: kube-system
139   labels:
140     tier: node
141     app: flannel
142 spec:
143   selector:
144     matchLabels:
145       app: flannel
146   template:
147     metadata:
148       labels:
149         tier: node
150         app: flannel
151     spec:
152       affinity:
153         nodeAffinity:
154           requiredDuringSchedulingIgnoredDuringExecution:
155             nodeSelectorTerms:
156               - matchExpressions:
157                   - key: beta.kubernetes.io/os
158                     operator: In
159                     values:
160                       - linux
161                   - key: beta.kubernetes.io/arch
162                     operator: In
163                     values:
164                       - amd64       #使用平臺是amd64
165       hostNetwork: true             #使用宿主機的網絡,即Node1,Node2的網絡
166       tolerations:
167       - operator: Exists
168         effect: NoSchedule
169       serviceAccountName: flannel
170       initContainers:
171       - name: install-cni
172         image: lizhenliang/flannel:v0.11.0-amd64
173         command:
174         - cp
175         args:
176         - -f
177         - /etc/kube-flannel/cni-conf.json
178         - /etc/cni/net.d/10-flannel.conflist
179         volumeMounts:
180         - name: cni
181           mountPath: /etc/cni/net.d
182         - name: flannel-cfg
183           mountPath: /etc/kube-flannel/
184       containers:                  #該網絡插件使用到的鏡像文件
185       - name: kube-flannel
186         image: lizhenliang/flannel:v0.11.0-amd64
187         command:
188         - /opt/bin/flanneld    

 

受權apiserver訪問kubeletjson

                                      #(1)爲提供安全性,kubelet禁止匿名訪問,必須受權才
                                      #能夠.apiserver-to-kubelet-rbac.yaml經過這個yml
                                      #文件實現kubelet對apiserver受權        
                                      #(2)受權以後就能夠在maste節點上查看pod的日誌
                                      #(3)每一個node上會有一個flannel網絡
                                      #(4)在maste節點建立一個pod,並查看狀態
                                      #須要等pod的狀態由ContainerCreating變爲Running
                                      #(5)把這個pod提供的web服務暴露出去
                                      #(6)查看pod信息和service信息,在瀏覽器中訪問web
                                      #http://192.168.1.65:31513/能夠看到nginx的歡迎界面
[root@master1 ~]# kubectl apply -f apiserver-to-kubelet-rbac.yaml                              
[root@master1 ~]# kubectl logs -n kube-system kube-flannel-ds-amd64-h9dg6                              
[root@Node1 ~]# ifconfig flannel.1
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 10.244.0.0  netmask 255.255.255.255  broadcast 0.0.0.0
        ether f6:2a:44:e5:f1:8f  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
[root@Node1 ~]#            
[root@master1 ~]# kubectl create  deployment web --image=nginx        
[root@master1 ~]# kubectl get pods -o wide
NAME                  READY   STATUS    RESTARTS   AGE     IP           NODE   
web-d86c95cc9-v6ws9   1/1     Running   0          9m30s   10.244.0.2   node1  
[root@master1 ~]#
[root@master1 ~]# kubectl  expose  deployment web --port=80 --type=NodePort    
[root@master1 ~]# kubectl get pods,svc
NAME                      READY   STATUS    RESTARTS   AGE
pod/web-d86c95cc9-v6ws9   1/1     Running   0          15m

NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        2d
service/web          NodePort    10.0.0.98    <none>        80:31513/TCP   49s
[root@master1 ~]#

 

部署dashboard
https://github.com/kubernetes/dashboard
https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/

                                      #(1)經過dashboard.yaml文件部署dashboard.      
                                      #(2)kubernetes-dashboard命名空間內去查看pod
                                      #的運行信息,狀態信息須要從ContainerCreating  
                                      #變爲Running 
                                      #(3)查看發佈的端口信息,經過瀏覽器訪問任意Node
                                      #節點的30001端口https://192.168.1.65:30001/    
                                      #(4)經過dashboard-adminuser.yaml文件建立一個
                                      #ServiceAccount用戶,由它來建立token對該
                                      #ServiceAccount受權,使它加入cluster-admin
                                      #(5)經過shell指令獲取token的值,把token值粘貼
                                      #到瀏覽器
                                      #(6)經過UI對提供web服務的pod進行擴容
                                      #左側導航欄Workloads-->Deployment-->右側容器web
                                      #後面的... 選擇 Scale,副本數據設置爲3
[root@master1 ~]# kubectl apply -f dashboard.yaml          
[root@master1 ~]# kubectl get pods -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-566cddb686-mdnf8   1/1     Running   0          17m
kubernetes-dashboard-7b5bf5d559-4hjbr        1/1     Running   0          17m
[root@master1 ~]          
[root@master1 ~]# kubectl get pods,svc  -n kubernetes-dashboard
NAME                                             READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-566cddb686-mdnf8   1/1     Running   0          19m
pod/kubernetes-dashboard-7b5bf5d559-4hjbr        1/1     Running   0          19m

NAME                              TYPE       CLUSTER-IP EXTERNAL-IP PORT(S)        AGE
service/dashboard-metrics-scraper ClusterIP  10.0.0.186 <none>      8000/TCP       19m
service/kubernetes-dashboard      NodePort   10.0.0.211 <none>      443:30001/TCP  19m
[root@master1 ~]#          
[root@master1 ~]# kubectl  apply -f dashboard-adminuser.yaml          
[root@master1 ~]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n 
kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')          
[root@master1 ~]#          
[root@master1 ~]# kubectl get pods           
NAME                  READY   STATUS    RESTARTS   AGE
web-d86c95cc9-v6ws9   1/1     Running   0          91m
[root@master1 ~]#                  
[root@master1 ~]# kubectl get pods
NAME                  READY   STATUS    RESTARTS   AGE
web-d86c95cc9-srqb2   1/1     Running   0          2m56s
web-d86c95cc9-v6ws9   1/1     Running   0          97m
web-d86c95cc9-z4wzd   1/1     Running   0          2m56s
[root@master1 ~]#           

 

部署CroeDNS

https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns/coredns

                                      #(1)查看當前的資源,爲測試DNS作準備    
                                      #(2)編輯coredns.yaml,經過這個yaml文件來實現
                                      #自動部署coreDNS                                          #(3)建立busybox這個鏡像,以測試DNS功能                                      
[root@master1 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        2d1h
web          NodePort    10.0.0.98    <none>        80:31513/TCP   91m
[root@master1 ~]#         
[root@master1 ~]# vim coredns.yaml         
173 spec:
174   selector:
175     k8s-app: kube-dns
176   clusterIP: 10.0.0.2         #這個是DNS這個service的IP,kubelet向這個ip
177   ports:                      #發送DNS解析請求。這個地址要與Node節點上
178   - name: dns                 #/opt/kubernetes/cfg/kubelet-config.yml
179     port: 53                  #中的clusterDNS定義的地址一致
180     protocol: UDP
181   - name: dns-tcp
182     port: 53
183     protocol: TCP
[root@Node1 ~]# sed -n '7,8p' /opt/kubernetes/cfg/kubelet-config.yml
clusterDNS:
- 10.0.0.2
[root@Node1 ~]#    
[root@master1 ~]# kubectl  apply -f coredns.yaml     
[root@master1 ~]# kubectl get pods -n kube-system
NAME                          READY   STATUS    RESTARTS   AGE
coredns-6d8cfdd59d-td2wf      1/1     Running   0          115s
kube-flannel-ds-amd64-h9dg6   1/1     Running   0          3h51m
kube-flannel-ds-amd64-w7b9x   1/1     Running   0          3h51m
[root@master1 ~]#     
[root@master1 ~]# kubectl apply -f bs.yaml                                  
[root@master1 ~]# kubectl get pod
NAME                  READY   STATUS    RESTARTS   AGE
busybox               1/1     Running   0          78s
web-d86c95cc9-srqb2   1/1     Running   0          34m
web-d86c95cc9-v6ws9   1/1     Running   0          129m
web-d86c95cc9-z4wzd   1/1     Running   0          34m
[root@master1 ~]# 
[root@master1 ~]# kubectl exec -it busybox sh
/ # ping 10.0.0.98
PING 10.0.0.98 (10.0.0.98): 56 data bytes
64 bytes from 10.0.0.98: seq=0 ttl=255 time=0.116 ms
64 bytes from 10.0.0.98: seq=1 ttl=255 time=0.080 ms
/ # ping web
PING web (10.0.0.98): 56 data bytes
64 bytes from 10.0.0.98: seq=0 ttl=255 time=0.043 ms
64 bytes from 10.0.0.98: seq=1 ttl=255 time=0.062 ms            
/ # nslookup kubernetes
Server:    10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local
/ # 
/ # nslookup web
Server:    10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local

Name:      web
Address 1: 10.0.0.98 web.default.svc.cluster.local
/ # 

 

配置load balancer

                                      #(1)在LB主機上以rpm安裝nginx的1.16版本 
                                      #(2)在配置文件中http{}的上面添加stream{}
                                      #(3)啓動nginx服務
                                      #(4)安裝keepalive軟件 
                                      #(5)刪除默認的keepalived.conf文件,把模板
                                      #文件放在/etc/keepalived/下並修改配置
                                      #(6)爲check_nginx.sh賦權限
                                      #(7)啓動keepalive服務,並查看進程
                                      #(8)以一樣的方法配置LB2
                                      #(9)用ip add show查看LB1,LB2上的vip 
[root@LB1 ~]# rpm -ivh http://nginx.org/packages/rhel/7/x86_64/RPMS/
                                         nginx-1.16.1-1.el7.ngx.x86_64.rpm
[root@LB1 ~]# vim /etc/nginx/nginx.conf 
stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
                server 192.168.1.63:6443;
                server 192.168.1.64:6443;
            }
    
    server {
       listen 6443;
       proxy_pass k8s-apiserver;
    }
}
[root@LB1 ~]# systemctl start nginx
[root@LB1 ~]# systemctl enable nginx 
[root@LB1 ~]# yum -y install keepalived.x86_64 
[root@LB1 ~]# cd /etc/keepalived/
[root@LB1 keepalived]# cat keepalived.conf    
global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER
} 
                                         #在global_def{}與vrrp_instance {}之間添加故障
                                         #切換腳本
vrrp_script check_nginx {                #對過腳本check_nginx.sh判斷nginx服務是否正常
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER                         #備節點用BACKUP
    interface eth0                       #在eth0上啓用VIP        
    virtual_router_id 51                 # VRRP 路由 ID實例,每一個實例是惟一的 
    priority 100                         # 優先級,備服務器設置 90 
    advert_int 1                         # 指定VRRP 心跳包通告間隔時間,默認1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.1.60/24
    } 
    track_script {                       #在實例中添加健康檢查
        check_nginx                      #當check_nginx返回非0,則代表nginx有故障
    }                                    #keepalived就執行故障切換
}
[root@LB1 keepalived]#
[root@LB1 keepalived]# chmod +x check_nginx.sh
[root@LB1 keepalived]# cat check_nginx.sh 
#!/bin/bash                              #檢查nginx進程是否存在,若是存在返回1,
                                         #不然返回0
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    exit 1
else
    exit 0
fi
[root@LB1 keepalived]#  
[root@LB1 ~]# systemctl start keepalived  
[root@LB1 ~]# systemctl enable keepalived
[root@LB1 ~]# ps -ef | grep keep 
[root@LB1 ~]# ip add    

 

修改Node訪問api的地址

                                      #(1)在兩個Node上批量修改bootstrap.kubeconfig, 
                                      #kubelet.kubeconfig,kube-proxy.kubeconfig文件
                                      #中api-server的地址
                                      #(2)在Node上啓動kubelet,kube-proxy服務並查
                                      #看日誌
                                      #(3)驗證VIP地址是否在生效
                                      #在node節目上用master:/opt/kubernetes/cfg/
                                      #token文件中的token值,以curl -k去查詢
                                      #aipserver的版本
[root@Node1 ~]# cd /opt/kubernetes/cfg/
[root@Node1 cfg]# grep  192.168.1.63 ./*
./bootstrap.kubeconfig:     server: https://192.168.1.63:6443
./kubelet.kubeconfig:       server: https://192.168.1.63:6443
./kube-proxy.kubeconfig:    server: https://192.168.1.63:6443
[root@Node1 cfg]# 
[root@Node1 cfg]# sed -i 's#192.168.1.63#192.168.1.60#' ./*
[root@Node2 ~]# cd /opt/kubernetes/cfg/
[root@Node2 cfg]# sed -i  's#192.168.1.63#192.168.1.60#' ./*  
 [root@Node1 cfg]# systemctl restart kubelet
[root@Node1 cfg]# systemctl restart kube-proxy
[root@Node2 cfg]# systemctl restart kubelet
[root@Node2 cfg]# systemctl restart kube-proxy
[root@LB1 ~]# tail -f  /var/log/nginx/k8s-access.log 
192.168.1.65 192.168.1.63:6443 - [31/Aug/2020:15:05:16 +0800] 200 1155
192.168.1.65 192.168.1.64:6443 - [31/Aug/2020:15:05:16 +0800] 200 1156
192.168.1.66 192.168.1.63:6443 - [31/Aug/2020:15:12:18 +0800] 200 1156
192.168.1.66 192.168.1.63:6443 - [31/Aug/2020:15:12:18 +0800] 200 1155 
[root@Node1 ~]# curl -k --header "Authorization: Bearer c47ffb939f5ca36231d9e3121a252940"
                                                         https://192.168.1.60:6443/version
{
  "major": "1",
  "minor": "16",
  "gitVersion": "v1.16.0",
  "gitCommit": "2bd9643cee5b3b3a5ecbd3af49d09018f0773c77",
  "gitTreeState": "clean",
  "buildDate": "2019-09-18T14:27:17Z",
  "goVersion": "go1.12.9",
  "compiler": "gc",
  "platform": "linux/amd64"
}
[root@Node1 ~]#
相關文章
相關標籤/搜索