Kubernetes系列三:二進制安裝Kubernetes環境

安裝環境:

# 三個節點信息
192.168.31.11  主機名:env11  角色:部署Master節點/Node節點/ETCD節點
192.168.31.12  主機名:env12  角色:部署Node節點/ETCD節點
192.168.31.13  主機名:env13  角色:部署Node節點/ETCD節點
# 操做系統版本信息
CentOS Linux release 7.4.1708 (Core)
# 關閉每一個節點的firewall和selinux
systemctl stop firewall; systemctl disable firewall
setenforce 0; sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux

一、環境初始化

環境初始化操做,三個節點上操做一致。node

# 添加國內Docker源
cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安裝Docker
yum install -y docker-ce
# 啓動Docker並設置開機自啓
systemctl start docker; systemctl enable docker
# 準備部署目錄
mkdir -p /opt/kubernetes/{cfg,bin,ssl,log}
# 百度網盤地址
連接:https://pan.baidu.com/s/1PrunUVHfwL97jjc4PqL_Bw 
提取碼:4oeg 
# 解壓後複製文件到指定目錄
把 k8s-v1.10.3/bin/、etcd-v3.3.18-linux-amd6四、flannel-v0.10.0-linux-amd64目錄下的文件複製到/opt/kubernetes/bin目錄下
把 cni-plugins-amd64-v0.8.4目錄複製到/opt/kubernetes/bin目錄下並重命名爲cni
# 添加環境變量
vim .bash_profile
PATH=$PATH:$HOME/bin:/opt/kubernetes/bin
source .bash_profile

二、建立CA證書

從k8s的1.8版本開始,K8S系統各組件須要使用TLS證書對通訊進行加密。每個K8S集羣都須要獨立的CA證書體系。CA證書有如下三種:easyrsa、openssl、cfssl。這裏使用cfssl證書,也是目前使用最多的,相對來講配置簡單一些,經過json的格式,把證書相關的東西配置進去便可。這裏使用cfssl的版本爲1.2版本。linux

2.一、安裝CFSSL

cfssl官方地址:http://pkg.cfssl.org,從這個地址能夠下載最新版本cfssl。git

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

2.二、建立用來生成CA文件的JSON配置文件

# 建立臨時證書存放目錄
[root@env11 ssl]# mkdir -p /opt/src/ssl
[root@env11 ssl]# cd /opt/src/ssl
[root@env11 ssl]# vim ca-config.json
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "8760h"
      }
    }
  }
}

2.三、建立用來生成CA證書籤名請求(CSR)的JSON配置文件

[root@env11 ssl]# cd /opt/src/ssl
[root@env11 ssl]# vim ca-csr.json
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

2.四、生成CA證書(ca.pem)和密鑰(ca-key.pem)

[root@env11 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
[root@env11 ssl]# ls -l ca*
-rw-r--r--. 1 root root  290 Apr 14 19:00 ca-config.json
-rw-r--r--. 1 root root  208 Apr 14 19:00 ca-csr.json
-rw-------. 1 root root 1679 Apr 14 12:06 ca-key.pem
-rw-r--r--. 1 root root 1001 Apr 14 12:06 ca.csr
-rw-r--r--. 1 root root 1359 Apr 14 12:06 ca.pem

2.五、分發證書

拷貝生成的文件ca.csr,ca.pem,ca-key.pem,ca-config.json到三個節點的/opt/kubernetes/ssl目錄下github

三、ETCD集羣部署

全部持久化的狀態信息以KV的形式存儲在ETCD中。相似zookeeper,提供分佈式協調服務。之因此說kubenetes各個組件是無狀態的,就是由於其中把數據都存放在ETCD中。因爲ETCD支持集羣,這裏在三臺主機上都部署上ETCD。docker

3.一、建立etcd證書籤名請求

[root@env11 ssl]# cd /usr/src/ssl
[root@env11 ssl]# vim etcd-csr.json
{
  "CN": "etcd",
  "hosts": [         # 此處的IP就是ETCD各個集羣的IP
    "127.0.0.1",
    "192.168.31.11",
    "192.168.31.12",
    "192.168.31.13"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成etcd證書和私鑰json

cfssl gencert -ca=/opt/src/ssl/ca.pem \
  -ca-key=/opt/src/ssl/ca-key.pem \
  -config=/opt/src/ssl/ca-config.json \
  -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@env11 ssl]# ls -l etcd*
-rw-r--r--. 1 root root  299 Apr 14 19:06 etcd-csr.json
-rw-------. 1 root root 1675 Apr 14 12:08 etcd-key.pem
-rw-r--r--. 1 root root 1062 Apr 14 12:08 etcd.csr
-rw-r--r--. 1 root root 1436 Apr 14 12:08 etcd.pem

3.二、分發證書

拷貝生成的文件etcd.pem,etcd-key.pem到三個節點的/opt/kubernetes/ssl目錄下bootstrap

3.三、配置etcd配置文件

使用2379端口用於外部通訊,2380用於內部通訊vim

[root@env11 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.31.11:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.11:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.11:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.31.11:2380,etcd-node2=https://192.168.31.12:2380,etcd-node3=https://192.168.31.13:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.11:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

3.四、建立etcd系統服務

[root@env11 ~]# vim /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
 
[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify
 
[Install]
WantedBy=multi-user.target

3.五、分發配置文件

拷貝/opt/kubernetes/cfg/etcd.conf,/etc/systemd/system/etcd.service到三個機器對應目錄下,並加載配置設置etcd服務開機自啓。後端

# 三節點須要都操做
[root@env11 ~]# systemctl daemon-reload
[root@env11 ~]# systemctl enable etcd

複製過去的配置文件/opt/kubernetes/cfg/etcd.conf須要根據節點修改部分配置centos

# env12節點須要修改的地方
ETCD_NAME="etcd-node2"
ETCD_LISTEN_PEER_URLS="https://192.168.31.12:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.12:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.12:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.12:2379"
# env13節點須要修改的地方
ETCD_NAME="etcd-node3"
ETCD_LISTEN_PEER_URLS="https://192.168.31.13:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.13:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.13:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.13:2379"

3.六、建立etcd的數據存儲目錄並在三個節點啓動etcd

# 三個節點都須要操做
[root@env11 ~]# mkdir /var/lib/etcd/
[root@env11 ~]# systemctl start etcd
[root@env11 ~]# netstat -tunlp | grep etcd
tcp        0      0 192.168.31.11:2379      0.0.0.0:*               LISTEN      6002/etcd          
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      6002/etcd          
tcp        0      0 192.168.31.11:2380      0.0.0.0:*               LISTEN      6002/etcd

3.七、驗證etcd集羣

[root@env11 ~]# etcdctl --endpoints=https://192.168.31.11:2379 \
--ca-file=/opt/kubernetes/ssl/ca.pem \
--cert-file=/opt/kubernetes/ssl/etcd.pem \
--key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
member f35b781c28383ca is healthy: got healthy result from https://192.168.31.11:2379
member 18320ec3b6a86db4 is healthy: got healthy result from https://192.168.31.12:2379
member 5f87ed09e484b6b3 is healthy: got healthy result from https://192.168.31.13:2379

四、Master節點部署

4.一、建立生成CSR的JSON配置文件

[root@env11 ~]# cd /opt/src/ssl/
[root@env11 ssl]# vim kubernetes-csr.json
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.31.11",     # master的IP地址
    "10.1.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

4.二、生成Kubernetes證書和私鑰

cfssl gencert -ca=/opt/src/ssl/ca.pem \
   -ca-key=/opt/src/ssl/ca-key.pem \
   -config=/opt/src/ssl/ca-config.json \
   -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

拷貝生成的文件kubernetes-key.pem,kubernetes.pem到三個機器的/opt/kubernetes/ssl目錄下

4.三、建立kube-apiserver使用的客戶端token文件

[root@kenv11 ssl]# vim /opt/kubernetes/ssl/bootstrap-token.csv
bceaefa5f8d569895071fee2f77b5d3e,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

4.四、建立基礎用戶名、密碼認證配置

[root@env11 ssl]# vim /opt/kubernetes/ssl/basic-auth.csv
admin,admin,1
readonly,readonly,2

4.五、部署kubernetes  APIServer系統服務

[root@env11 ssl]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
 
[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
  --bind-address=192.168.31.11 \
  --insecure-bind-address=127.0.0.1 \
  --authorization-mode=Node,RBAC \
  --runtime-config=rbac.authorization.k8s.io/v1 \
  --kubelet-https=true \
  --anonymous-auth=false \
  --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
  --enable-bootstrap-token-auth \
  --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
  --service-cluster-ip-range=10.1.0.0/16 \
  --service-node-port-range=20000-40000 \
  --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
  --client-ca-file=/opt/kubernetes/ssl/ca.pem \
  --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --etcd-cafile=/opt/kubernetes/ssl/ca.pem \
  --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \
  --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \
  --etcd-servers=https://192.168.31.11:2379,https://192.168.31.12:2379,https://192.168.31.13:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/opt/kubernetes/log/api-audit.log \
  --event-ttl=1h \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target

4.六、啓動並設置API Server開機自啓

[root@env11 ssl]# systemctl daemon-reload
[root@env11 ssl]# systemctl enable kube-apiserver; systemctl start kube-apiserver
[root@env11 ssl]# netstat -tunlp | grep kube-apiserver
tcp        0      0 192.168.31.11:6443      0.0.0.0:*               LISTEN      6008/kube-apiserver
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      6008/kube-apiserver

從監聽端口能夠看到apiServer在監聽6443端口,同時也監聽本地的8080端口,是提供給kube-scheduler和kube-controllerManager使用。

4.七、部署kubernetes  ControllerManager系統服務

ControllerManager由一系列的控制器組成,經過apiServer監控整個集羣的狀態,並保證集羣處於預期的工做狀態。

[root@env11 ssl]# vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
 
[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.1.0.0/16 \
  --cluster-cidr=10.2.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/opt/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
 
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target

4.八、啓動並設置ControllerManager開機自啓

[root@env11 ssl]# systemctl daemon-reload
[root@env11 ssl]# systemctl enable  kube-controller-manager; systemctl start  kube-controller-manager
[root@env11 ssl]# netstat -tunlp | grep kube-controll 
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      12112/kube-controll

4.九、部署kubernetes  Scheduler系統服務

Scheduler負責分配調度Pod到集羣內的node節點;監聽kube-apiServer查詢還未分配Node的Pod;根據調度策略爲這些Pod分配節點。

[root@env11 ssl]# vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
 
[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
 
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target

4.十、啓動並設置Scheduler開機自啓

[root@env11 ssl]# systemctl daemon-reload
[root@env11 ssl]# systemctl enable  kube-scheduler; systemctl start  kube-scheduler
[root@env11 ssl]# netstat -tunlp | grep kube-scheduler
tcp        0      0 127.0.0.1:10251         0.0.0.0:*               LISTEN      5662/kube-scheduler

4.十一、部署kubectl命令行工具

準備二進制命令包,建立admin證書請求。

[root@env11 ssl]# vim admin-csr.json
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}

生成admin證書和私鑰

cfssl gencert -ca=/opt/src/ssl/ca.pem \
   -ca-key=/opt/src/ssl/ca-key.pem \
   -config=/opt/src/ssl/ca-config.json \
   -profile=kubernetes admin-csr.json | cfssljson -bare admin
[root@env11 ssl]# ls -l admin*
-rw-r--r--. 1 root root  229 Apr 14 15:32 admin-csr.json
-rw-------. 1 root root 1679 Apr 14 15:33 admin-key.pem
-rw-r--r--. 1 root root 1009 Apr 14 15:33 admin.csr
-rw-r--r--. 1 root root 1399 Apr 14 15:33 admin.pem
# 複製證書到本地目錄
cp admin*.pem /opt/kubernetes/ssl/

向kubeconfig配置文件添加集羣

kubectl config set-cluster kubernetes \
   --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
   --server=https://192.168.31.11:6443

向kubeconfig配置文件添加用戶

kubectl config set-credentials admin \
   --client-certificate=/opt/kubernetes/ssl/admin.pem \
   --embed-certs=true \
   --client-key=/opt/kubernetes/ssl/admin-key.pem

向kubeconfig配置文件添加context

kubectl config set-context kubernetes \
   --cluster=kubernetes \
   --user=admin

設置默認上下文context

kubectl config use-context kubernetes

測試是否能夠獲取信息

[root@env11 ssl]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health": "true"}  
etcd-0               Healthy   {"health": "true"}  
controller-manager   Healthy   ok                   
etcd-2               Healthy   {"health": "true"}

 

五、Node節點部署(三個節點都是node節點)

5.一、部署kubelet

建立角色綁定,kubelet啓動時會向kube-apiServer發送tsl bootstrap請求,因此須要將bootstrap的token設置成對應的角色,這樣kubelet纔有權限建立該請求。

[root@env11]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

建立kubelet bootstrapping kubeconfig文件 設置集羣參數

cd /opt/src/ssl
kubectl config set-cluster kubernetes \
   --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
   --server=https://192.168.31.11:6443 \
   --kubeconfig=bootstrap.kubeconfig

設置客戶端認證參數

kubectl config set-credentials kubelet-bootstrap \
   --token=ad6d5bb607a186796d8861557df0d17f \
   --kubeconfig=bootstrap.kubeconfig

設置上下文

kubectl config set-context default \
   --cluster=kubernetes \
   --user=kubelet-bootstrap \
   --kubeconfig=bootstrap.kubeconfig

選擇默認上下文

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

分發bootstrap.kubeconfig 到三節點/opt/kubernetes/cfg

5.二、部署kubelet設置CNI支持

配置CNI使用flannel做爲後端插件。

[root@env11 ~]# mkdir -p /etc/cni/net.d
[root@env11 ~]# vim /etc/cni/net.d/10-default.conf
{
        "name": "flannel",
        "type": "flannel",
        "delegate": {
            "bridge": "docker0",
            "isDefaultGateway": true,
            "mtu": 1400
        }
}

分發配置/etc/cni/net.d/10-default.conf到三節點相同目錄下。三個節點都建立kubelet數據存儲目錄

[root@env11 ~]# mkdir -p /var/lib/kubelet/

建立kubelet系統服務

[root@env11 ~]# vim /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
 
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
  --address=192.168.31.11 \
  --hostname-override=192.168.31.11 \
  --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
  --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
  --cert-dir=/opt/kubernetes/ssl \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/kubernetes/bin/cni \
  --cluster-dns=10.1.0.2 \
  --cluster-domain=cluster.local. \
  --hairpin-mode hairpin-veth \
  --allow-privileged=true \
  --fail-swap-on=false \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target

分發/usr/lib/systemd/system/kubelet.service到另外兩個節點下,而且修改相應的配置爲對應節點的IP地址,啓動並設置kubelet開機自啓。

systemctl daemon-reload
systemctl enable kubelet; systemctl start kubelet

在env11節點上查看csr請求

[root@env11 ssl]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-GefhvoMO3iYaqsn-6PokIw0iC7n-TLH3MFezRD_cZPg   21s       kubelet-bootstrap   Pending
node-csr-_QoxwxQe13lLzZ0VgSh7P604iXeve0X6EaGB-rxFj9Q   40s       kubelet-bootstrap   Pending

批准kubelet的TLS證書請求

kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve

執行完畢後,在env11節點查看節點狀態已是Ready狀態了

[root@env11 ssl]# kubectl get node
NAME            STATUS    ROLES     AGE       VERSION
192.168.31.12   Ready     <none>    62d       v1.10.3
192.168.31.13   Ready     <none>    62d       v1.10.3

5.三、部署kube-proxy

配置kube-proyx使用LVS,三個節點上都安裝。

yum install -y ipvsadm ipset conntrack

建立kube-proxy證書請求

[root@env11 ssl]# cd /opt/src/ssl/
[root@env11 ssl]# vim kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成證書

cfssl gencert -ca=/opt/src/ssl/ca.pem \
   -ca-key=/opt/src/ssl/ca-key.pem \
   -config=/opt/src/ssl/ca-config.json \
   -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

分發證書kube-proxy.csr,kube-proxy.pem,kube-proxy-key.pem到全部Node節點

建立kube-proxy配置文件

kubectl config set-cluster kubernetes \
   --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
   --server=https://192.168.31.11:6443 \
   --kubeconfig=kube-proxy.kubeconfig
  
   kubectl config set-credentials kube-proxy \
   --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
   --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
   --embed-certs=true \
   --kubeconfig=kube-proxy.kubeconfig
  
    kubectl config set-context default \
   --cluster=kubernetes \
   --user=kube-proxy \
   --kubeconfig=kube-proxy.kubeconfig
  
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

分發kube-proxy.kubeconfig 到三節點/opt/kubernetes/cfg/

建立kube-proxy系統服務

# 建立數據存儲目錄
mkdir /var/lib/kube-proxy
#
[root@env11 ssl]# vim /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
 
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
  --bind-address=192.168.31.11 \
  --hostname-override=192.168.31.11 \
  --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
  --masquerade-all \
  --feature-gates=SupportIPVSProxyMode=true \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
 
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target

分發配置/usr/lib/systemd/system/kube-proxy.service文件到三個節點,而且修改配置爲對應節點的IP地址,並設置kube-proxy開機自啓。

啓動並設置kube-proxy開機自啓

systemctl daemon-reload
systemctl enable kube-proxy; systemctl start kube-proxy

查看LVS狀態,已經能夠看到建立了一個LVS集羣,來自10.1.0.1:443的請求轉到192.168.31.11:6443,二6443就是apiServer的端口

[root@env12 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr persistent 10800
  -> 192.168.31.11:6443           Masq    1      4          0

k8s的集羣部署完畢,因爲k8s自己不支持網絡,須要藉助第三方網絡才能進建立Pod。

六、Flannel網絡部署

6.一、爲flannel生成證書請求

[root@env11 ~]# cd /opt/src/ssl/
[root@env11 ssl]# vim flanneld-csr.json
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成證書

cfssl gencert -ca=/opt/src/ssl/ca.pem \
     -ca-key=/opt/src/ssl/ca-key.pem \
     -config=/opt/src/ssl/ca-config.json \
     -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
[root@env11 ssl]# ll flannel*
-rw-r--r--. 1 root root  221 Apr 14 15:58 flanneld-csr.json
-rw-------. 1 root root 1679 Apr 14 15:58 flanneld-key.pem
-rw-r--r--. 1 root root  997 Apr 14 15:58 flanneld.csr
-rw-r--r--. 1 root root 1391 Apr 14 15:58 flanneld.pem

6.二、分發證書和文件

複製配置文件flanneld-key.pem,flanneld.pem到三節點的目錄/opt/kubernetes/ssl

複製flannel相關啓動文件flanneld mk-docker-opts.sh remove-docker0.sh到三節點目錄/opt/kubernetes/bin

6.三、配置flannel配置文件

[root@env11 bin]# cat /opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=https://192.168.31.11:2379,https://192.168.31.12:2379,https://192.168.31.13:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"

分發配置文件/opt/kubernetes/cfg/flannel到三節點/opt/kubernetes/cfg目錄。

6.四、設置Flannel系統服務

[root@env11 bin]# cat /usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service
 
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker
 
Type=notify
 
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service

分發配置/usr/lib/systemd/system/flannel.service文件到三個節點。

6.五、下載CNI插件

https://github.com/containernetworking/plugins/releases,CNI插件官方地址,/opt/kubernetes/bin/cni 最終放置目錄,解壓時已經安裝好。

6.六、建立ETCD的key

爲了建立Pod的網段,並在etcd中存儲,然後flannel從etcd中取出並進行分配。

# 使用etcdctl 的mk命令寫入一個鍵值對
/opt/kubernetes/bin/etcdctl
--ca-file /opt/kubernetes/ssl/ca.pem \
--cert-file /opt/kubernetes/ssl/flanneld.pem \
--key-file /opt/kubernetes/ssl/flanneld-key.pem \
--no-sync -C https://192.168.31.11:2379,https://192.168.31.12:2379,https://192.168.31.13:2379 \
mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' >/dev/null 2>&1

6.七、啓動並設置flannel開機自啓

三個節點都設置。看到每一個節點上會多出一個flannel.1的網卡,不一樣的節點都在不一樣的網段。

systemctl daemon-reload
systemctl enable flannel; systemctl start flannel

6.八、配置Docker使用Flannel

[root@env11 ~]# vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
#BindsTo=containerd.service
#After=network-online.target firewalld.service containerd.service
# 讓docker在flannel網絡後面啓動
After=network-online.target firewalld.service flannel.service
Wants=network-online.target
Requires=flannel.service
 
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
# 增長,設置docker0的IP地址爲flannel分配的ip地址
EnvironmentFile=-/run/flannel/docker
#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
# 修改啓動,參數生效
ExecStart=/usr/bin/dockerd $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
 
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
 
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
 
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
 
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this option.
TasksMax=infinity
 
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
 
# kill only the docker process, not all processes in the cgroup
KillMode=process
 
[Install]
WantedBy=multi-user.target

三個節點都設置,隨後重啓docker服務看到,docker0的ip地址爲flannel分配的IP地址。

七、CoreDNS

7.一、部署CoreDNS

[root@env11 ~]# vim coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local. in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        proxy . /etc/resolv.conf
        cache 30
    }
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      serviceAccountName: coredns
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      containers:
      - name: coredns
        image: coredns/coredns:1.0.6
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 10.1.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
# kubectl apply -f coredns.yaml

 

選擇默認上下文

相關文章
相關標籤/搜索