Kubernetes - - k8s - v1.12.4 二進制部署

  • 注:如下全部的配置文件,均可以參考: https://github.com/xiaoqshuo/k8-ha-install/tree/Binary_deployment_12.4

1,環境部署

1.1.1 軟件信息

服務 版本
kubernetes v1.12.4
CentOS 7.6 CentOS Linux release 7.6.1810 (Core)
Docker v18.06
etcd v3.3.11
calico 3.1.4

1.1.2 硬件信息

IP 角色 安裝軟件
192.168.2.101 k8s master etcd,kube-apiserver,kube-controller-manager,kube-scheduler
192.168.2.102 k8s master etcd,kube-apiserver,kube-controller-manager,kube-scheduler
192.168.2.103 k8s master etcd,kube-apiserver,kube-controller-manager,kube-scheduler
192.168.2.111 k8s node01 docker,kubelet,kube-proxy
192.168.2.112 k8s node02 docker,kubelet,kube-proxy
192.168.2.113 k8s node02 docker,kubelet,kube-proxy

1.2 安裝前準備

1.2.1 配置時區

ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time.windows.com

1.2.2 下載經常使用命令

yum -y install vim tree wget lrzsz

1.2.3 配置登陸超時以及歷史顯示格式

cat  << EOF >> /etc/profile
###########################
export PS1='\[\e[32;1m\][\u@\h \W]\$ \[\e[0m\]'
export HISTTIMEFORMAT="`whoami`_%F %T :"
alias grep='grep --color=auto'
alias egrep='egrep --color=auto'
EOF

1.2.4 配置yum源

  • 備份源
mkdir /etc/yum.repos.d/bak
cp /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/
  • CentOS-Base.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
  • epel.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
  • docker-ce.repo
# 刪除已安裝的Docker
yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-selinux \
                  docker-engine-selinux \
                  docker-engine

# 安裝 docker-ce 使用命令
yum install -y yum-utils device-mapper-persistent-data lvm2

# 配置docker-ce 官方源
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

# 配置docker-ce 阿里雲源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  • kubernetes.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
  • 生成緩存,系統更新
yum makecache
yum update -y

1.2.5 設置SELINUX爲permissive模式

vi /etc/selinux/config
SELINUX=permissive

setenforce 0

1.2.6 設置iptables參數

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl --system

1.2.7 禁用swap

swapoff -a

# 禁用fstab中的swap項目
vi /etc/fstab
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

# 確認swap已經被禁用
cat /proc/swaps
Filename                Type        Size    Used    Priority

1.2.8 limit配置

ulimit -SHn 65535

1.2.9 hosts文件配置

cat << EOF >> /etc/hosts
192.168.2.100 k8s-master-lb
192.168.2.101 k8s-master01
192.168.2.102 k8s-master02
192.168.2.103 k8s-master03
192.168.2.111 k8s-node01
192.168.2.112 k8s-node02
192.168.2.113 k8s-node03
EOF
[root@k8s-master01 ~]# more /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.2.100 k8s-master-lb
192.168.2.101 k8s-master01
192.168.2.101 k8s-master02
192.168.2.102 k8s-master03
192.168.2.111 k8s-node01
192.168.2.111 k8s-node02
192.168.2.112 k8s-node03

1.2.10 全部節點加載ipvs模塊

modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4

1.2.11 關閉防火牆

systemctl disable firewalld
systemctl stop firewalld

1.2.12 重啓主機

reboot

1.2.13 全部節點互信

ssh-keygen -t rsa

for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

1.2.14 安裝證書工具(k8s-master01)

mkdir /opt/ssl
cd /opt/ssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x *
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

1.2.15 生成證書(k8s-master01)

  • 建立 CA 配置文件 ca-config.json
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
  • 建立 CA 證書籤名請求 ca-csr.json
cat > ca-csr.json << EOF
{
    "CN": "Kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
  • 生成證書
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

2 部署Etcd集羣

2.1 生成 Etcd 證書

  • 建立 Etcd 服務器 CA 證書籤名請求 etcd-csr.json
cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "127.0.0.1",
    "192.168.2.101",
    "192.168.2.102",
    "192.168.2.103"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
  • 生成證書
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
# ls *.pem
ca-key.pem  ca.pem  etcd-key.pem  etcd.pem
mkdir /etc/kubernetes/etcd/{bin,cfg,ssl} -p
cp ca-key.pem  ca.pem  etcd-key.pem  etcd.pem /etc/kubernetes/etcd/ssl/

2.2 部署Etcd

  • 二進制包下載地址:https://github.com/etcd-io/etcd/releases/tag/v3.3.11
wget https://github.com/etcd-io/etcd/releases/download/v3.3.11/etcd-v3.3.11-linux-amd64.tar.gz
tar zxvf etcd-v3.3.11-linux-amd64.tar.gz
mv etcd-v3.3.11-linux-amd64/{etcd,etcdctl} /etc/kubernetes/etcd/bin

2.2.1 建立etcd配置文件

  • 如下部署步驟在規劃的三個etcd節點操做同樣,惟一不一樣的是etcd配置文件中的服務器IP要寫當前的:html

  • 例如:k8s-master01node

cat > /etc/kubernetes/etcd/cfg/etcd  << EOF
#[Member]
ETCD_NAME="k8s-master01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.2.101:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.2.101:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.101:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.101:2379"
ETCD_INITIAL_CLUSTER="k8s-master01=https://192.168.2.101:2380,k8s-master02=https://192.168.2.102:2380,k8s-master03=https://192.168.2.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
  • ETCD_NAME 節點名稱
  • ETCD_DATA_DIR 數據目錄
  • ETCD_LISTEN_PEER_URLS 集羣通訊監聽地址
  • ETCD_LISTEN_CLIENT_URLS 客戶端訪問監聽地址
  • ETCD_INITIAL_ADVERTISE_PEER_URLS 集羣通告地址
  • ETCD_ADVERTISE_CLIENT_URLS 客戶端通告地址
  • ETCD_INITIAL_CLUSTER 集羣節點地址
  • ETCD_INITIAL_CLUSTER_TOKEN 集羣Token
  • ETCD_INITIAL_CLUSTER_STATE 加入集羣的當前狀態,new是新集羣,existing表示加入已有集羣

2.2.2 systemd管理etcd

# cat  /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd etcd
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/etc/kubernetes/etcd/cfg/etcd
ExecStart=/etc/kubernetes/etcd/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/etc/kubernetes/etcd/ssl/etcd.pem \
--key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem \
--peer-cert-file=/etc/kubernetes/etcd/ssl/etcd.pem \
--peer-key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/kubernetes/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

2.2.3 分發 etcd 集羣 配置文件及證書

  • 注:修改其它 etcd 配置文件
    • 修改成當前服務器的主機名
    • 修改成當前服務器的ip地址
USER=root
CONTROL_PLANE_IPS="k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03"
for host in $CONTROL_PLANE_IPS; do
    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/"
    scp -r /etc/kubernetes/etcd/ "${USER}"@$host:/etc/kubernetes/
    scp -r /usr/lib/systemd/system/etcd.service "${USER}"@$host:/usr/lib/systemd/system/etcd.service
done

2.2.4 啓動並設置開啓啓動

systemctl start etcd
systemctl enable etcd

2.2.5 驗證 etcd 集羣

# /etc/kubernetes/etcd/bin/etcdctl \
--ca-file=/etc/kubernetes/etcd/ssl/ca.pem --cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.2.101:2379,https://192.168.2.102:2379,https://192.168.2.103:2379" \
cluster-health

# 結果
member ad9328796634e0d0 is healthy: got healthy result from https://192.168.2.101:2379
member f03f45bbcae9634b is healthy: got healthy result from https://192.168.2.103:2379
member fddf9c47e41c5ec2 is healthy: got healthy result from https://192.168.2.102:2379
cluster is healthy

3, 部署 kubernetes master節點

3.1 Haproxy+keepalived配置k8s master高可用(每臺master都進行操做,紅色字體改爲對應主機的便可)

  • keepalived 提供 kube-apiserver 對外服務的 VIP;
  • haproxy 監聽 VIP,後端鏈接全部 kube-apiserver 實例,提供健康檢查和負載均衡功能;
  • 運行 keepalived 和 haproxy 的節點稱爲 LB 節點。因爲 keepalived 是一主多備運行模式,故至少兩個 LB 節點。
  • 本文檔複用 master 節點的三臺機器,haproxy 監聽的端口(8443) 須要與 kube-apiserver 的端口 6443 不一樣,避免衝突。
  • keepalived 在運行過程當中週期檢查本機的 haproxy 進程狀態,若是檢測到 haproxy 進程異常,則觸發從新選主的過程,VIP 將飄移到新選出來的主節點,從而實現 VIP 的高可用。
  • 全部組件(如 kubeclt、apiserver、controller-manager、scheduler 等)都經過 VIP 和 haproxy 監聽的 8443 端口訪問 kube-apiserver 服務。

3.1.1 安裝haproxy和keepalived

yum install -y keepalived haproxy

3.1.2 master配置haproxy代理api-server服務

cp /etc/haproxy/haproxy.cfg{,.bak}
cat > /etc/haproxy/haproxy.cfg << EOF
global
    log /dev/log    local0
    log /dev/log    local1 notice
    chroot /var/lib/haproxy
    stats socket /var/run/haproxy-admin.sock mode 660 level admin
    stats timeout 30s
    user haproxy
    group haproxy
    daemon
    nbproc 1

defaults
    log     global
    timeout connect 5000
    timeout client  10m
    timeout server  10m

listen  admin_stats
    bind 0.0.0.0:10080
    mode http
    log 127.0.0.1 local0 err
    stats refresh 30s
    stats uri /status
    stats realm welcome login\ Haproxy
    stats auth admin:123456
    stats hide-version
    stats admin if TRUE

listen kube-master
    bind 0.0.0.0:8443
    mode tcp
    option tcplog
    balance roundrobin
    server 192.168.2.101 192.168.2.101:6443 check inter 2000 fall 2 rise 2 weight 1
    server 192.168.2.102 192.168.2.102:6443 check inter 2000 fall 2 rise 2 weight 1
    server 192.168.2.103 192.168.2.103:6443 check inter 2000 fall 2 rise 2 weight 1
EOF
  • haproxy 在 10080 端口輸出 status 信息;
  • haproxy 監聽全部接口的 8443 端口,該端口與環境變量 ${KUBE_APISERVER} 指定的端口必須一致;
  • server 字段列出全部 kube-apiserver 監聽的 IP 和端口;

3.1.3 三個master配置keepalived服務

cp /etc/keepalived/keepalived.conf{,.bak}
cat >  /etc/keepalived/keepalived.conf << EOF
global_defs {
    router_id lb-master-100
}

vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 3
}

vrrp_instance VI-kube-master {
    state MASTER
    priority 120
    dont_track_primary
    interface ens160
    virtual_router_id 68
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.2.100 #VIP,訪問此IP調用api-server
    }
}
EOF
  • 使用 killall -0 haproxy 命令檢查所在節點的 haproxy 進程是否正常。
  • router_id、virtual_router_id 用於標識屬於該 HA 的 keepalived 實例,若是有多套 keepalived HA,則必須各不相同;
  • 其餘2個backup把nopreempt去掉,及priority分別設置110和100便可。
  • 例如:
cp /etc/keepalived/keepalived.conf{,.bak}
cat >  /etc/keepalived/keepalived.conf << EOF
global_defs {
    router_id lb-master-100
}

vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 3
}

vrrp_instance VI-kube-master {
    state BACKUP
    priority 110
    dont_track_primary
    interface ens160
    virtual_router_id 68
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.2.100 #VIP,訪問此IP調用api-server
    }
}
EOF

3.1.4 啓動haproxy和keepalived服務

#haproxy
systemctl enable haproxy
systemctl start haproxy

#keepalive
systemctl enable keepalived
systemctl start keepalived

3.1.5 查看haproxy和keepalived服務狀態以及VIP狀況

systemctl status haproxy|grep Active
systemctl status keepalived|grep Active

3.1.6 查看VIP所屬狀況

systemctl status haproxy|grep Active
systemctl status keepalived|grep Active
  • 若是Active: active (running)表示正常。

3.1.7 查看VIP所屬狀況

# ip addr show | grep 192.168.2.100
    inet 192.168.2.100/32 scope global ens160

3.2 部署kubectl命令工具

  • kubectl 是 kubernetes 集羣的命令行管理工具,本文檔介紹安裝和配置它的步驟。
  • kubectl 默認從 ~/.kube/config 文件讀取 kube-apiserver 地址、證書、用戶名等信息,若是沒有配置,執行 kubectl 命令時可能會出錯。
  • ~/.kube/config只須要部署一次,而後拷貝到其餘的master。

3.2.1 下載kubectl

  • 下載二進制包:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md#server-binaries
  • 下載這個包(kubernetes-server-linux-amd64.tar.gz)就夠了,包含了所需的全部組件。
wget https://dl.k8s.io/v1.12.4/kubernetes-server-linux-amd64.tar.gz    # 不能下載用下一個連接(國外下載)
# wget https://storage.googleapis.com/kubernetes-release/release/v1.12.4/kubernetes-server-linux-amd64.tar.gz    # (國內下載)
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp kube-apiserver kube-scheduler kube-controller-manager kubectl /etc/kubernetes/server/bin/

3.2.2 建立請求證書

  • admin-csr.json
cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF
  • O 爲 system:masters,kube-apiserver 收到該證書後將請求的 Group 設置爲 system:masters;
  • 預約義的 ClusterRoleBinding cluster-admin 將 Group system:masters 與 Role cluster-admin 綁定,該 Role 授予全部 API的權限;
  • 該證書只會被 kubectl 當作 client 證書使用,因此 hosts 字段爲空;
  • 生成證書和私鑰
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
  • 移動證書
cp ca-key.pem  ca.pem admin.pem admin-key.pem /etc/kubernetes/server/ssl/

3.2.3 建立~/.kube/config文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.2.100:8443 \
  --kubeconfig=kubectl.kubeconfig

# 設置客戶端認證參數
kubectl config set-credentials admin \
  --client-certificate=/etc/kubernetes/server/ssl/admin.pem \
  --client-key=/etc/kubernetes/server/ssl/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=kubectl.kubeconfig

# 設置上下文參數
kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin \
  --kubeconfig=kubectl.kubeconfig
  
# 設置默認上下文
kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig

3.2.4 分發~/.kube/config文件

cp kubectl.kubeconfig ~/.kube/config
for i in k8s-master02 k8s-master03;do scp -r ~/.kube/ $i:~/;done

3.3 部署apiserver組件

3.3.1 生成 apiserver 證書

  • apiserver-csr.json
cat > apiserver-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "192.168.2.101",
      "192.168.2.102",
      "192.168.2.103",
      "192.168.2.100",
      "10.254.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
  • hosts 字段指定受權使用該證書的 IP 或域名列表,這裏列出了 VIP 、apiserver 節點 IP、kubernetes 服務 IP 和域名;
  • 域名最後字符不能是 .(如不能爲 kubernetes.default.svc.cluster.local.),不然解析時失敗,提示: x509: cannot parse dnsName "kubernetes.default.svc.cluster.local.";
  • 若是使用非 cluster.local 域名,如 bqding.com,則須要修改域名列表中的最後兩個域名爲:kubernetes.default.svc.bqding、kubernetes.default.svc.bqding.comlinux

  • 生成證書和私鑰nginx

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes apiserver-csr.json | cfssljson -bare apiservier
  • 移動證書
cp apiservier*.pem /etc/kubernetes/server/ssl/

3.3.2 建立加密配置文件

cat > /etc/kubernetes/server/cfg/encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: $(head -c 32 /dev/urandom | base64)
      - identity: {}
EOF

3.3.3 kube-apiserver 配置文件

# cat /etc/kubernetes/server/cfg/kube-apiserver
KUBE_APISERVER_OPTS=" --enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --experimental-encryption-provider-config=/etc/kubernetes/server/ssl/encryption-config.yaml \
  --advertise-address=192.168.2.101 \
  --bind-address=192.168.2.101 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.254.0.0/16 \
  --service-node-port-range=30000-32700 \
  --tls-cert-file=/etc/kubernetes/server/ssl/apiservier.pem \
  --tls-private-key-file=/etc/kubernetes/server/ssl/apiservier-key.pem \
  --client-ca-file=/etc/kubernetes/server/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/server/ssl/apiservier.pem \
  --kubelet-client-key=/etc/kubernetes/server/ssl/apiservier-key.pem \
  --service-account-key-file=/etc/kubernetes/server/ssl/ca-key.pem \
  --etcd-cafile=/etc/kubernetes/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.2.101:2379,https://192.168.2.102:2379,https://192.168.2.103:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2"
  • --experimental-encryption-provider-config:啓用加密特性;
  • --authorization-mode=Node,RBAC: 開啓 Node 和 RBAC 受權模式,拒絕未受權的請求;
  • --enable-admission-plugins:啓用 ServiceAccount 和 NodeRestriction;
  • --service-account-key-file:簽名 ServiceAccount Token 的公鑰文件,kube-controller-manager 的 --service-account-private-key-file 指定私鑰文件,二者配對使用;
  • --tls-*-file:指定 apiserver 使用的證書、私鑰和 CA 文件。--client-ca-file 用於驗證 client (kue-controller-manager、kube-scheduler、kubelet、kube-proxy 等)請求所帶的證書;
  • --kubelet-client-certificate、--kubelet-client-key:若是指定,則使用 https 訪問 kubelet APIs;須要爲證書對應的用戶(上面 kubernetes*.pem 證書的用戶爲 kubernetes) 用戶定義 RBAC 規則,不然訪問 kubelet API 時提示未受權;
  • --bind-address: 不能爲 127.0.0.1,是本機IP地址,不然外界不能訪問它的安全端口 6443;
  • --insecure-port=0:關閉監聽非安全端口(8080);
  • --service-cluster-ip-range: 指定 Service Cluster IP 地址段;
  • --service-node-port-range: 指定 NodePort 的端口範圍;
  • --runtime-config=api/all=true: 啓用全部版本的 APIs,如 autoscaling/v2alpha1;
  • --enable-bootstrap-token-auth:啓用 kubelet bootstrap 的 token 認證;
  • --apiserver-count=3:指定集羣運行模式,多臺 kube-apiserver 會經過 leader 選舉產生一個工做節點,其它節點處於阻塞狀態;

3.3.4 systemd管理kube-apiserver組件

# cat /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/server/cfg/kube-apiserver
ExecStart=/etc/kubernetes/server/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

3.3.5 建立日誌目錄

mkdir -p /var/log/kubernetes

3.3.6 分發配置文件以及證書

  • 修改其它 master 節點 配置文件裏的 當前IP地址
USER=root
for host in k8s-master02 k8s-master03;do 
    ssh "${USER}"@$host "mkdir -p /var/log/kubernetes" 
    scp -r /etc/kubernetes/server/ "${USER}"@$host:/etc/kubernetes/ 
    scp /usr/lib/systemd/system/kube-apiserver.service "${USER}"@$host:/usr/lib/systemd/system/kube-apiserver.service 
done

3.3.7 啓動api-server服務

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver

3.3.8 檢查kube-apiserve服務

# netstat -ptln | grep kube-apiserve
tcp        0      0 192.168.2.101:6443      0.0.0.0:*               LISTEN      15786/kube-apiserve
  • 集羣狀態
# kubectl cluster-info
Kubernetes master is running at https://192.168.2.100:8443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

3.3.9 授予kubernetes證書訪問kubelet api權限

kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

3.4 部署controller-manager組件

  • 該集羣包含 3 個節點,啓動後將經過競爭選舉機制產生一個 leader 節點,其它節點爲阻塞狀態。當 leader 節點不可用後,剩餘節點將再次進行選舉產生新的 leader 節點,從而保證服務的可用性。
    爲保證通訊安全,本文檔先生成 x509 證書和私鑰,kube-controller-manager 在以下兩種狀況下使用該證書:
    • 與 kube-apiserver 的安全端口通訊時;
    • 在安全端口(https,10252) 輸出 prometheus 格式的 metrics;

3.4.1 建立kube-controller-manager證書請求

cat > kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.2.101",
      "192.168.2.102",
      "192.168.2.103"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-controller-manager",
        "OU": "System"
      }
    ]
}
EOF
  • hosts 列表包含全部 kube-controller-manager 節點 IP;
  • CN 爲 system:kube-controller-manager、O 爲 system:kube-controller-manager,kubernetes 內置的 ClusterRoleBindings system:kube-controller-manager 賦予 kube-controller-manager 工做所需的權限。
  • 生成證書和私鑰
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
  • 移動證書
cp kube-controller-manager*.pem /etc/kubernetes/server/ssl/

3.4.2 建立 kube-controller-manager.kubeconfig 文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.2.100:8443 \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=/etc/kubernetes/server/ssl/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/server/ssl/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-context system:kube-controller-manager \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

3.4.3 controller-manager 配置文件

# cat /etc/kubernetes/server/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--port=0 \
  --secure-port=10252 \
  --bind-address=127.0.0.1 \
  --kubeconfig=/etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig \
  --authentication-kubeconfig=/etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig \
  --service-cluster-ip-range=10.254.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/server/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/server/ssl/ca-key.pem \
  --experimental-cluster-signing-duration=8760h \
  --root-ca-file=/etc/kubernetes/server/ssl/ca.pem \
  --service-account-private-key-file=/etc/kubernetes/server/ssl/ca-key.pem \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-use-rest-clients=true \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=/etc/kubernetes/server/ssl/kube-controller-manager.pem \
  --tls-private-key-file=/etc/kubernetes/server/ssl/kube-controller-manager-key.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2"
  • --port=0:關閉監聽 http /metrics 的請求,同時 --address 參數無效,--bind-address 參數有效;
  • --secure-port=1025二、--bind-address=0.0.0.0: 在全部網絡接口監聽 10252 端口的 https /metrics 請求;
  • --kubeconfig:指定 kubeconfig 文件路徑,kube-controller-manager 使用它鏈接和驗證 kube-apiserver;
  • --cluster-signing-*-file:簽名 TLS Bootstrap 建立的證書;
  • --experimental-cluster-signing-duration:指定 TLS Bootstrap 證書的有效期;
  • --root-ca-file:放置到容器 ServiceAccount 中的 CA 證書,用來對 kube-apiserver 的證書進行校驗;
  • --service-account-private-key-file:簽名 ServiceAccount 中 Token 的私鑰文件,必須和 kube-apiserver 的 --service-account-key-file 指定的公鑰文件配對使用;
  • --service-cluster-ip-range :指定 Service Cluster IP 網段,必須和 kube-apiserver 中的同名參數一致;
  • --leader-elect=true:集羣運行模式,啓用選舉功能;被選爲 leader 的節點負責處理工做,其它節點爲阻塞狀態;
  • --feature-gates=RotateKubeletServerCertificate=true:開啓 kublet server 證書的自動更新特性;
  • --controllers=*,bootstrapsigner,tokencleaner:啓用的控制器列表,tokencleaner 用於自動清理過時的 Bootstrap token;
  • --horizontal-pod-autoscaler-*:custom metrics 相關參數,支持 autoscaling/v2alpha1;
  • --tls-cert-file、--tls-private-key-file:使用 https 輸出 metrics 時使用的 Server 證書和祕鑰;
  • --use-service-account-credentials=true:

3.4.4 systemd管理controller-manager組件

# cat /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/server/cfg/kube-controller-manager
ExecStart=/etc/kubernetes/server/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

3.4.5 分發配置文件以及證書

USER=root
for host in k8s-master02 k8s-master03;do 
    ssh "${USER}"@$host "mkdir -p /var/log/kubernetes" 
    scp /etc/kubernetes/server/ssl/kube-controller-manager*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/
    scp /usr/lib/systemd/system/kube-controller-manager.service "${USER}"@$host:/usr/lib/systemd/system/kube-controller-manager.service
    scp /etc/kubernetes/server/cfg/kube-controller-manager "${USER}"@$host:/etc/kubernetes/server/cfg/kube-controller-manager  
    scp /etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig "${USER}"@$host:/etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig  
done

3.4.6 啓動kube-controller-manager服務

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager

3.4.7 檢查kube-controller-manager服務

# netstat -lnpt|grep kube-controlle
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      3090/kube-controlle

3.4.8 查看當前kube-controller-manager的leader

# kubectl get endpoints kube-controller-manager --namespace=kube-system  -o yaml

apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master01_28c03ae9-18a9-11e9-a6d8-000c2927a0d0","leaseDurationSeconds":15,"acquireTime":"2019-01-15T09:37:38Z","renewTime":"2019-01-15T09:42:06Z","leaderTransitions":1}'
  creationTimestamp: 2019-01-15T09:37:14Z
  name: kube-controller-manager
  namespace: kube-system
  resourceVersion: "2413"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  uid: 24132473-18a9-11e9-936a-000c2927a0d0

3.5 部署kube-scheduler組件

  • 該集羣包含 3 個節點,啓動後將經過競爭選舉機制產生一個 leader 節點,其它節點爲阻塞狀態。當 leader 節點不可用後,剩餘節點將再次進行選舉產生新的 leader 節點,從而保證服務的可用性。
  • 爲保證通訊安全,本文檔先生成 x509 證書和私鑰,kube-scheduler 在以下兩種狀況下使用該證書:
    • 與 kube-apiserver 的安全端口通訊;
    • 在安全端口(https,10251) 輸出 prometheus 格式的 metrics;

3.5.1 建立kube-scheduler證書請求

# cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "192.168.2.101",
      "192.168.2.102",
      "192.168.2.103"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-scheduler",
        "OU": "System"
      }
    ]
}
EOF
  • hosts 列表包含全部 kube-scheduler 節點 IP;
  • CN 爲 system:kube-scheduler、O 爲 system:kube-scheduler,kubernetes 內置的 ClusterRoleBindings system:kube-scheduler 將賦予 kube-scheduler 工做所需的權限。
  • 生成證書和私鑰
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes  kube-scheduler-csr.json | cfssljson -bare  kube-scheduler
  • 移動證書
cp kube-scheduler*.pem /etc/kubernetes/server/ssl/

3.5.2 建立 kube-scheduler.kubeconfig 文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.2.100:8443 \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler \
  --client-certificate=/etc/kubernetes/server/ssl/kube-scheduler.pem \
  --client-key=/etc/kubernetes/server/ssl/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-context system:kube-scheduler \
  --cluster=kubernetes \
  --user=system:kube-scheduler \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

3.5.3 kube-scheduler 配置文件

# cat /etc/kubernetes/server/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS=" --address=127.0.0.1 \
  --kubeconfig=/etc/kubernetes/server/cfg/kube-scheduler.kubeconfig \
  --leader-elect=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2"
  • --address:在 127.0.0.1:10251 端口接收 http /metrics 請求;kube-scheduler 目前還不支持接收 https 請求;
  • --kubeconfig:指定 kubeconfig 文件路徑,kube-scheduler 使用它鏈接和驗證 kube-apiserver;
  • --leader-elect=true:集羣運行模式,啓用選舉功能;被選爲 leader 的節點負責處理工做,其它節點爲阻塞狀態;

3.5.4 systemd管理kube-scheduler組件

# cat /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/server/cfg/kube-scheduler
ExecStart=/etc/kubernetes/server/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5


[Install]
WantedBy=multi-user.target

3.5.5 分發配置文件以及證書

USER=root
for host in k8s-master02 k8s-master03;do 
    ssh "${USER}"@$host "mkdir -p /var/log/kubernetes" 
    scp /etc/kubernetes/server/ssl/kube-scheduler*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/
    scp /usr/lib/systemd/system/kube-scheduler.service "${USER}"@$host:/usr/lib/systemd/system/kube-scheduler.service
    scp /etc/kubernetes/server/cfg/kube-scheduler "${USER}"@$host:/etc/kubernetes/server/cfg/kube-scheduler  
    scp /etc/kubernetes/server/cfg/kube-scheduler.kubeconfig "${USER}"@$host:/etc/kubernetes/server/cfg/kube-scheduler.kubeconfig  
done

3.5.6 啓動kube-scheduler服務

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler

3.5.7 檢查kube-scheduler服務

# netstat -lnpt|grep kube-scheduler
tcp        0      0 127.0.0.1:10251         0.0.0.0:*               LISTEN      3155/kube-scheduler

3.5.8 查看當前kube-scheduler的leader

# kubectl get endpoints kube-scheduler --namespace=kube-system  -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master01_eb23817d-18a9-11e9-8445-000c2927a0d0","leaseDurationSeconds":15,"acquireTime":"2019-01-15T09:43:05Z","renewTime":"2019-01-15T09:44:32Z","leaderTransitions":1}'
  creationTimestamp: 2019-01-15T09:37:03Z
  name: kube-scheduler
  namespace: kube-system
  resourceVersion: "2594"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  uid: 1d51b563-18a9-11e9-bfed-000c296ab1b4

3.6 在全部master節點上驗證功能是否正常

# kubectl get componentstatuses
NAME                 STATUS      MESSAGE                                                                                                                                  ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x15\x03\x01\x00\x02\x02"
scheduler            Healthy     ok
etcd-2               Healthy     {"health":"true"}
etcd-0               Healthy     {"health":"true"}
etcd-1               Healthy     {"health":"true"}

4, 部署 kubernetes node節點

  • 依賴包
yum install -y epel-release wget conntrack ipvsadm ipset jq iptables curl sysstat libseccomp && /usr/sbin/modprobe ip_vs

4.1 在 Node 安裝docker

  • 列出 docker-ce 全部的版本
yum list docker-ce --showduplicates | sort -r
  • 指定版本安裝 docker-ce (推薦)
yum install -y docker-ce-18.06.1.ce-3.el7
  • 啓動docker-ce
systemctl enable docker && systemctl start docker

4.2 部署kubelet組件

  • kublet 運行在每一個 worker 節點上,接收 kube-apiserver 發送的請求,管理 Pod 容器,執行交互式命令,如 exec、run、logs 等。
  • kublet 啓動時自動向 kube-apiserver 註冊節點信息,內置的 cadvisor 統計和監控節點的資源使用狀況。
  • 爲確保安全,本文檔只開啓接收 https 請求的安全端口,對請求進行認證和受權,拒絕未受權的訪問(如 apiserver、heapster)。

4.2.1 下載 kubelet 二進制文件

wget https://dl.k8s.io/v1.12.4/kubernetes-server-linux-amd64.tar.gz
tar -xzvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kubelet kube-proxy /etc/kubernetes/server/bin/

4.2.2 建立kubelet bootstrap kubeconfig文件 (k8s-master01上執行)

#建立 token
export BOOTSTRAP_TOKEN=$(kubeadm token create \
  --description kubelet-bootstrap-token \
  --groups system:bootstrappers:k8s-master01 \
  --kubeconfig ~/.kube/config)

# 設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.2.100:8443 \
  --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig

# 設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig

# 設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig

# 設置默認上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig
  • kubelet bootstrap kubeconfig文件建立三次,分別把k8s-master01改爲k8s-master0二、k8s-master03。
  • 證書中寫入 Token 而非證書,證書後續由 controller-manager 建立。

4.2.3 查看 kubeadm 爲各節點建立的 token

# kubeadm token list --kubeconfig ~/.kube/config
TOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION               EXTRA GROUPS
cpwqfo.x1vxl10wzq1e3eid   23h       2019-01-17T10:00:48+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-master02
hfn1ki.7550z7bywogn1hjm   23h       2019-01-17T10:00:32+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-master03
sexqfs.8vb2su8o8iinp1jh   23h       2019-01-17T09:57:36+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-master01
  • 建立的 token 有效期爲 1 天,超期後將不能再被使用,且會被 kube-controller-manager 的 tokencleaner 清理(若是啓用該 controller 的話);
  • kube-apiserver 接收 kubelet 的 bootstrap token 後,將請求的 user 設置爲 system:bootstrap:,group 設置爲 system:bootstrappers;git

  • 查看各 token 關聯的 Secretgithub

# kubectl get secrets  -n kube-system
NAME                                             TYPE                                  DATA   AGE
attachdetach-controller-token-tprrl              kubernetes.io/service-account-token   3      16h
bootstrap-signer-token-k9xbg                     kubernetes.io/service-account-token   3      16h
bootstrap-token-cpwqfo                           bootstrap.kubernetes.io/token         7      4m4s
bootstrap-token-hfn1ki                           bootstrap.kubernetes.io/token         7      4m20s
bootstrap-token-sexqfs                           bootstrap.kubernetes.io/token         7      7m16s
certificate-controller-token-8pm9l               kubernetes.io/service-account-token   3      16h
clusterrole-aggregation-controller-token-l6z4j   kubernetes.io/service-account-token   3      16h
cronjob-controller-token-ntrcn                   kubernetes.io/service-account-token   3      16h
daemon-set-controller-token-hpsgr                kubernetes.io/service-account-token   3      16h
default-token-jh6zz                              kubernetes.io/service-account-token   3      16h
deployment-controller-token-l6s7n                kubernetes.io/service-account-token   3      16h
disruption-controller-token-zdb4r                kubernetes.io/service-account-token   3      16h
endpoint-controller-token-8k7lw                  kubernetes.io/service-account-token   3      16h
expand-controller-token-fwrbt                    kubernetes.io/service-account-token   3      16h
generic-garbage-collector-token-v6ll5            kubernetes.io/service-account-token   3      16h
horizontal-pod-autoscaler-token-9f5t5            kubernetes.io/service-account-token   3      16h
job-controller-token-vcjvp                       kubernetes.io/service-account-token   3      16h
namespace-controller-token-zx28b                 kubernetes.io/service-account-token   3      16h
node-controller-token-d9nl5                      kubernetes.io/service-account-token   3      16h
persistent-volume-binder-token-7lcfq             kubernetes.io/service-account-token   3      16h
pod-garbage-collector-token-gx445                kubernetes.io/service-account-token   3      16h
pv-protection-controller-token-lv2n4             kubernetes.io/service-account-token   3      16h
pvc-protection-controller-token-cpvk7            kubernetes.io/service-account-token   3      16h
replicaset-controller-token-52xhf                kubernetes.io/service-account-token   3      16h
replication-controller-token-qbs4f               kubernetes.io/service-account-token   3      16h
resourcequota-controller-token-gphkl             kubernetes.io/service-account-token   3      16h
service-account-controller-token-vk9mn           kubernetes.io/service-account-token   3      16h
service-controller-token-mntf7                   kubernetes.io/service-account-token   3      16h
statefulset-controller-token-ljnbs               kubernetes.io/service-account-token   3      16h
token-cleaner-token-v65g8                        kubernetes.io/service-account-token   3      16h
ttl-controller-token-w5cpc                       kubernetes.io/service-account-token   3      16h

4.2.4 建立 kubelet 參數配置文件

  • 從 v1.10 開始,kubelet 部分參數需在配置文件中配置,kubelet --help 會提示:
DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag
  • 建立 kubelet 參數配置模板文件
cat > kubelet.config.json <<EOF
{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "authentication": {
    "x509": {
      "clientCAFile": "/etc/kubernetes/server/ssl/ca.pem"
    },
    "webhook": {
      "enabled": true,
      "cacheTTL": "2m0s"
    },
    "anonymous": {
      "enabled": false
    }
  },
  "authorization": {
    "mode": "Webhook",
    "webhook": {
      "cacheAuthorizedTTL": "5m0s",
      "cacheUnauthorizedTTL": "30s"
    }
  },
  "address": "NodeIP",
  "port": 10250,
  "readOnlyPort": 0,
  "cgroupDriver": "cgroupfs",
  "hairpinMode": "promiscuous-bridge",
  "serializeImagePulls": false,
  "featureGates": {
    "RotateKubeletClientCertificate": true,
    "RotateKubeletServerCertificate": true
  },
  "clusterDomain": "cluster.local.",
  "clusterDNS": ["10.254.0.2"]
}
EOF
  • address:API 監聽地址,不能爲 127.0.0.1,不然 kube-apiserver、heapster 等不能調用 kubelet 的 API;
  • readOnlyPort=0:關閉只讀端口(默認 10255),等效爲未指定;
  • authentication.anonymous.enabled:設置爲 false,不容許匿名�訪問 10250 端口;
  • authentication.x509.clientCAFile:指定簽名客戶端證書的 CA 證書,開啓 HTTP 證書認證;
  • authentication.webhook.enabled=true:開啓 HTTPs bearer token 認證;
  • 對於未經過 x509 證書和 webhook 認證的請求(kube-apiserver 或其餘客戶端),將被拒絕,提示 Unauthorized;
  • authroization.mode=Webhook:kubelet 使用 SubjectAccessReview API 查詢 kube-apiserver 某 user、group 是否具備操做資源的權限(RBAC);
  • featureGates.RotateKubeletClientCertificate、featureGates.RotateKubeletServerCertificate:自動 rotate 證書,證書的有效期取決於 kube-controller-manager 的 --experimental-cluster-signing-duration 參數;
  • 須要 root 帳戶運行;

4.2.5 建立 kubelet systemd unit文件

[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/etc/kubernetes/server/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/server/ssl \
  --kubeconfig=/etc/kubernetes/server/cfg/kubelet.kubeconfig \
  --config=/etc/kubernetes/server/cfg/kubelet.config.json \
  --network-plugin=cni \
  --hostname-override=NodeIP \
  --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  --allow-privileged=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
  • 若是設置了 --hostname-override 選項,則 kube-proxy 也須要設置該選項,不然會出現找不到 Node 的狀況;
  • --bootstrap-kubeconfig:指向 bootstrap kubeconfig 文件,kubelet 使用該文件中的用戶名和 token 向 kube-apiserver 發送 TLS Bootstrapping 請求;
  • K8S approve kubelet 的 csr 請求後,在 --cert-dir 目錄建立證書和私鑰文件,而後寫入 --kubeconfig 文件;

4.2.6 分發配置文件以及證書

USER=root
for host in k8s-node01 k8s-node02 k8s-node03;do 
    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/server/{bin,cfg,ssl}" 
    scp /etc/kubernetes/server/bin/kubelet "${USER}"@$host:/etc/kubernetes/server/bin/kubelet
    scp /etc/kubernetes/server/bin/kube-proxy "${USER}"@$host:/etc/kubernetes/server/bin/kube-proxy
    scp /usr/lib/systemd/system/kubelet.service "${USER}"@$host:/usr/lib/systemd/system/kubelet.service
    scp /etc/kubernetes/server/cfg/kubelet.config.json "${USER}"@$host:/etc/kubernetes/server/cfg/kubelet.config.json
    scp /etc/kubernetes/server/ssl/ca*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/ 
done
scp /etc/kubernetes/server/cfg/kubelet-bootstrap-k8s-master01.kubeconfig k8s-node01:/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig
scp /etc/kubernetes/server/cfg/kubelet-bootstrap-k8s-master02.kubeconfig k8s-node02:/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig
scp /etc/kubernetes/server/cfg/kubelet-bootstrap-k8s-master03.kubeconfig k8s-node03:/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig
  • 修改配置文件中 NodeIP 爲當前 node ip 地址

4.2.7 Bootstrap Token Auth和授予權限

  • kublet 啓動時查找配置的 --kubeletconfig 文件是否存在,若是不存在則使用 --bootstrap-kubeconfig 向 kube-apiserver 發送證書籤名請求 (CSR)。
  • kube-apiserver 收到 CSR 請求後,對其中的 Token 進行認證(事先使用 kubeadm 建立的 token),認證經過後將請求的 user 設置爲 system:bootstrap:,group 設置爲 system:bootstrappers,這一過程稱爲 Bootstrap Token Auth。
  • 默認狀況下,這個 user 和 group 沒有建立 CSR 的權限,kubelet 啓動失敗,錯誤日誌以下:
# sudo journalctl -u kubelet -a |grep -A 2 'certificatesigningrequests'
Jan 16 10:57:58 k8s-node01 kubelet[13154]: F0116 10:57:58.720659   13154 server.go:262] failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:bootstrap:sexqfs" cannot create resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope
Jan 16 10:57:58 k8s-node01 kubelet[13154]: goroutine 1 [running]:
Jan 16 10:57:58 k8s-node01 kubelet[13154]: k8s.io/kubernetes/vendor/github.com/golang/glog.stacks(0xc420b42500, 0xc4208c6000, 0x137, 0x36f)
  • 解決辦法是:建立一個 clusterrolebinding,將 group system:bootstrappers 和 clusterrole system:node-bootstrapper 綁定
# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

4.2.8 啓動kubelet服務

systemctl daemon-reload 
systemctl enable kubelet 
systemctl restart kubelet
  • 關閉 swap 分區,不然 kubelet 會啓動失敗;
  • 必須先建立工做和日誌目錄;
  • kubelet 啓動後使用 --bootstrap-kubeconfig 向 kube-apiserver 發送 CSR 請求,當這個 CSR 被 approve 後,kube-controller-manager 爲 kubelet 建立 TLS 客戶端證書、私鑰和 --kubeletconfig 文件。golang

  • 注意:kube-controller-manager 須要配置 --cluster-signing-cert-file 和 --cluster-signing-key-file 參數,纔會爲 TLS Bootstrap 建立證書和私鑰。web

  • 三個 work 節點的 csr 均處於 pending 狀態;
  • 此時kubelet的進程有,可是監聽端口還未啓動,須要進行下面步驟!chrome

4.2.9 approve kubelet csr請求

  • 能夠手動或自動 approve CSR 請求。推薦使用自動的方式,由於從 v1.8 版本開始,能夠自動輪轉approve csr 後生成的證書。

4.2.9.1 手動approve csr請求

  • 查看 CSR 列表
# kubectl get csr
NAME                                                   AGE     REQUESTOR                 CONDITION
node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY   3m46s   system:bootstrap:sexqfs   Pending
node-csr-c9EwBERPn8pjoCkYvX7jV-GansnNO4V2kPT3msYFVu4   3m46s   system:bootstrap:cpwqfo   Pending
node-csr-tPZAgKp8z-3nZMe4rPR2WEscJB-ox61VMQtijy6BO_M   3m46s   system:bootstrap:hfn1ki   Pending
  • approve CSR
# kubectl certificate approve node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY
certificatesigningrequest.certificates.k8s.io/node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY approved
  • 查看 Approve 結果
# kubectl get csr
NAME                                                   AGE     REQUESTOR                 CONDITION
node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY   4m34s   system:bootstrap:sexqfs   Approved,Issued
node-csr-c9EwBERPn8pjoCkYvX7jV-GansnNO4V2kPT3msYFVu4   4m34s   system:bootstrap:cpwqfo   Pending
node-csr-tPZAgKp8z-3nZMe4rPR2WEscJB-ox61VMQtijy6BO_M   4m34s   system:bootstrap:hfn1ki   Pending
# kubectl describe csr node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY
Name:               node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY
Labels:             <none>
Annotations:        <none>
CreationTimestamp:  Wed, 16 Jan 2019 10:59:33 +0800
Requesting User:    system:bootstrap:sexqfs
Status:             Approved,Issued
Subject:
         Common Name:    system:node:192.168.2.111
         Serial Number:
         Organization:   system:nodes
Events:  <none>
  • Requesting User:請求 CSR 的用戶,kube-apiserver 對它進行認證和受權;
  • Subject:請求籤名的證書信息;
  • 證書的 CN 是 system:node:192.168.80.10, Organization 是 system:nodes,kube-apiserver 的 Node 受權模式會授予該證書的相關權限;

4.2.9.2 自動approve csr請求

  • 建立三個 ClusterRoleBinding,分別用於自動 approve client、renew client、renew server 證書
# cat > csr-crb.yaml <<EOF
 # Approve all CSRs for the group "system:bootstrappers"
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: auto-approve-csrs-for-group
 subjects:
 - kind: Group
   name: system:bootstrappers
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
   apiGroup: rbac.authorization.k8s.io
---
 # To let a node of the group "system:nodes" renew its own credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-client-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
   apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]
---
 # To let a node of the group "system:nodes" renew its own server credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-server-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: approve-node-server-renewal-csr
   apiGroup: rbac.authorization.k8s.io
EOF
  • auto-approve-csrs-for-group:自動 approve node 的第一次 CSR; 注意第一次 CSR 時,請求的 Group 爲 system:bootstrappers;
  • node-client-cert-renewal:自動 approve node 後續過時的 client 證書,自動生成的證書 Group 爲 system:nodes;
  • node-server-cert-renewal:自動 approve node 後續過時的 server 證書,自動生成的證書 Group 爲 system:nodes;docker

  • 生效配置

# kubectl apply -f csr-crb.yaml

4.2.10 查看kubelet狀況

  • 等待一段時間(1-10 分鐘),三個節點的 CSR 都被自動 approve
# kubectl get csr
NAME                                                   AGE   REQUESTOR                 CONDITION
node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY   21m   system:bootstrap:sexqfs   Approved,Issued
node-csr-c9EwBERPn8pjoCkYvX7jV-GansnNO4V2kPT3msYFVu4   21m   system:bootstrap:cpwqfo   Approved,Issued
node-csr-tPZAgKp8z-3nZMe4rPR2WEscJB-ox61VMQtijy6BO_M   21m   system:bootstrap:hfn1ki   Approved,Issued
  • 全部節點均 ready
# kubectl get node
NAME            STATUS   ROLES    AGE     VERSION
192.168.2.111   Ready    <none>   17m     v1.12.4
192.168.2.112   Ready    <none>   7m45s   v1.12.4
192.168.2.113   Ready    <none>   7m44s   v1.12.4
  • kube-controller-manager 爲各 node 生成了 kubeconfig 文件和公私鑰
# tree /etc/kubernetes/server/
/etc/kubernetes/server/
├── bin
│   ├── kubectl
│   ├── kubelet
│   └── kube-proxy
├── cfg
│   ├── kubelet-bootstrap.kubeconfig
│   ├── kubelet.config.json
│   └── kubelet.kubeconfig
└── ssl
    ├── ca-key.pem
    ├── ca.pem
    ├── kubelet-client-2019-01-16-11-03-54.pem
    ├── kubelet-client-current.pem -> /etc/kubernetes/server/ssl/kubelet-client-2019-01-16-11-03-54.pem
    ├── kubelet.crt
    └── kubelet.key
  • kubelet-server 證書會週期輪轉

4.2.11 Kubelet提供的API接口

  • kublet 啓動後監聽多個端口,用於接收 kube-apiserver 或其它組件發送的請求
# netstat -lnpt|grep kubelet
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      13537/kubelet
tcp        0      0 192.168.2.111:10250     0.0.0.0:*               LISTEN      13537/kubelet
tcp        0      0 127.0.0.1:39767         0.0.0.0:*               LISTEN      13537/kubelet
  • 4194: cadvisor http 服務 (隨機端口);
  • 10248: healthz http 服務;
  • 10250: https API 服務;注意:未開啓只讀端口 10255;

  • kubelet 接收 10250 端口的 https 請求:
    • /pods、/runningpods
    • /metrics、/metrics/cadvisor、/metrics/probes
    • /spec
    • /stats、/stats/container
    • /logs
    • /run/、"/exec/", "/attach/", "/portForward/", "/containerLogs/" 等管理;
    • 詳情參考:https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/server/server.go#L434:3
  • 因爲關閉了匿名認證,同時開啓了 webhook 受權,全部訪問 10250 端口 https API 的請求都須要被認證和受權。

  • 例如執行 kubectl ec -it nginx-ds-5rmws -- sh 命令時,kube-apiserver 會向 kubelet 發送以下請求:

POST /exec/default/nginx-ds-5rmws/my-nginx?command=sh&input=1&output=1&tty=1
  • 預約義的 ClusterRole system:kubelet-api-admin 授予訪問 kubelet 全部 API 的權限:
# kubectl describe clusterrole system:kubelet-api-admin
Name:         system:kubelet-api-admin
Labels:       kubernetes.io/bootstrapping=rbac-defaults
Annotations:  rbac.authorization.kubernetes.io/autoupdate: true
PolicyRule:
  Resources      Non-Resource URLs  Resource Names  Verbs
  ---------      -----------------  --------------  -----
  nodes/log      []                 []              [*]
  nodes/metrics  []                 []              [*]
  nodes/proxy    []                 []              [*]
  nodes/spec     []                 []              [*]
  nodes/stats    []                 []              [*]
  nodes          []                 []              [get list watch proxy]

4.2.12 kubet api認證和受權

  • kublet的配置文件kubelet.config.json配置了以下認證參數:
    • authentication.anonymous.enabled:設置爲 false,不容許匿名訪問 10250 端口;
    • authentication.x509.clientCAFile:指定簽名客戶端證書的 CA 證書,開啓 HTTPs 證書認證;
    • authentication.webhook.enabled=true:開啓 HTTPs bearer token 認證;
  • 同時配置了以下受權參數:
    • authroization.mode=Webhook:開啓 RBAC 受權;
  • kubelet 收到請求後,使用 clientCAFile 對證書籤名進行認證,或者查詢 bearer token 是否有效。若是二者都沒經過,則拒絕請求,提示 Unauthorized
# curl -s --cacert /etc/kubernetes/server/ssl/ca.pem https://192.168.2.111:10250/metrics
# curl -s --cacert /etc/kubernetes/server/ssl/ca.pem -H "Authorization: Bearer 123456"  https://192.168.2.111:10250/metrics
  • 證書認證和受權
# curl -s --cacert /etc/kubernetes/server/ssl/ca.pem --cert /etc/kubernetes/server/ssl/kube-controller-manager.pem --key /etc/kubernetes/server/ssl/kube-controller-manager-key.pem https://192.168.2.111:10250/metrics


#  curl -s --cacert /etc/kubernetes/server/ssl/ca.pem --cert /etc/kubernetes/server/ssl/admin.pem --key /etc/kubernetes/server/ssl/admin-key.pem https://192.168.2.111:10250/metrics|head
  • bear token 認證和受權
  • 建立一個 ServiceAccount,將它和 ClusterRole system:kubelet-api-admin 綁定,從而具備調用 kubelet API 的權限
kubectl create sa kubelet-api-test
kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin --serviceaccount=default:kubelet-api-test
SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}')
TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}')
echo ${TOKEN}

curl -s --cacert /etc/kubernetes/server/ssl/ca.pem -H "Authorization: Bearer ${TOKEN}" https://192.168.2.111:10250/metrics|head

4.3 部署kube-proxy組件

  • kube-proxy 運行在全部 worker 節點上,,它監聽 apiserver 中 service 和 Endpoint 的變化狀況,建立路由規則來進行服務負載均衡。
  • 本文檔講解部署 kube-proxy 的部署,使用 ipvs 模式。

4.3.1 生成 kube-proxy 證書

  • 配置
cat << EOF | tee kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Beijing",
      "ST": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
  • CN:指定該證書的 User 爲 system:kube-proxy;
  • 預約義的 RoleBinding system:node-proxier 將User system:kube-proxy 與 Role system:node-proxier 綁定,該 Role 授予了調用 kube-apiserver Proxy 相關 API 的權限;
  • 該證書只會被 kube-proxy 當作 client 證書使用,因此 hosts 字段爲空;

  • 生成證書

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
  • 移動證書
mkdir /etc/kubernetes/server/{bin,cfg,ssl} -p
cp kube-proxy-key.pem  kube-proxy.pem /etc/kubernetes/server/ssl/

4.3.2 建立和分發kubeconfig文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.2.100:8443 \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/server/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/server/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
  • -embed-certs=true:將 ca.pem 和 admin.pem 證書內容嵌入到生成的 kubectl-proxy.kubeconfig 文件中(不加時,寫入的是證書文件路徑)

4.3.3 建立 kube-proxy systemd unit 文件

  • 從 v1.10 開始,kube-proxy 部分參數能夠配置文件中配置。可使用 --write-config-to 選項生成該配置文件,或者參考 kubeproxyconfig 的類型定義源文件 :https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/apis/kubeproxyconfig/types.go
# cat /usr/lib/systemd/system/kube-proxy.service 
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/etc/kubernetes/server/bin/kube-proxy \
  --bind-address=192.168.2.111 \
  --hostname-override=k8s-node01\
  --cluster-cidr=172.16.0.0/16 \
  --kubeconfig=/etc/kubernetes/server/cfg/kube-proxy.kubeconfig \
  --feature-gates=SupportIPVSProxyMode=true \
  --masquerade-all \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/var/lib/kube-proxy/log

Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
  • bind-address: 監聽地址;
  • clientConnection.kubeconfig: 鏈接 apiserver 的 kubeconfig 文件;
  • clusterCIDR: kube-proxy 根據 --cluster-cidr 判斷集羣內部和外部流量,指定 --cluster-cidr 或 --masquerade-all選項後 kube-proxy 纔會對訪問 Service IP 的請求作 SNAT;
  • hostname-override: 參數值必須與 kubelet 的值一致,不然 kube-proxy 啓動後會找不到該 Node,從而不會建立任何 ipvs 規則;
  • proxy-mode: 使用 ipvs 模式;
  • 修改改對應主機的信息。其中clusterc idr爲docker0網絡地址。

4.3.4 分發配置文件以及證書

USER=root
for host in k8s-node01 k8s-node02 k8s-node03;do 
    ssh "${USER}"@$host "mkdir -p mkdir -p /var/lib/kube-proxy/log" 
    scp /usr/lib/systemd/system/kube-proxy.service "${USER}"@$host:/usr/lib/systemd/system/kube-proxy.service
    scp /etc/kubernetes/server/cfg/kube-proxy.kubeconfig "${USER}"@$host:/etc/kubernetes/server/cfg/kube-proxy.kubeconfig
    scp /etc/kubernetes/server/ssl/kube-proxy*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/ 
done

4.3.5 啓動kube-proxy服務

systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

4.3.6 檢查啓動結果

systemctl status kube-proxy|grep Active
  • 確保狀態爲 active (running),不然查看日誌,確認緣由:
journalctl -u kube-proxy
  • 查看監聽端口狀態
# netstat -lnpt|grep kube-proxy
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      21237/kube-proxy
tcp6       0      0 :::10256                :::*                    LISTEN      21237/kube-proxy
  • 10249:http prometheus metrics port
  • 10256:http healthz port

4.3.7 查看ipvs路由規則

# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.254.0.1:443 rr
  -> 192.168.2.101:6443           Masq    1      0          0
  -> 192.168.2.102:6443           Masq    1      0          0
  -> 192.168.2.103:6443           Masq    1      0          0
  • 可見將全部到 kubernetes cluster ip 443 端口的請求都轉發到 kube-apiserver 的 6443 端口。

5,配置 calico 網絡

5.1 calico安裝

  • 主要參考官方文檔 https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/calico

5.1.1 下載calico.yaml rbac.yaml

curl https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/rbac.yaml -O
curl https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/calico.yaml -O

5.1.2 配置 calico 文件

  • etcd 地址
ETCD_ENDPOINTS="https://192.168.2.101:2379,https://192.168.2.102:2379,https://192.168.2.103:2379"
sed -i "s#.*etcd_endpoints:.*#  etcd_endpoints: \"${ETCD_ENDPOINTS}\"#g" calico.yaml
sed -i "s#__ETCD_ENDPOINTS__#${ETCD_ENDPOINTS}#g" calico.yaml
  • etcd 證書
ETCD_CERT=`cat /etc/kubernetes/etcd/ssl/etcd.pem | base64 | tr -d '\n'`
ETCD_KEY=`cat /etc/kubernetes/etcd/ssl/etcd-key.pem | base64 | tr -d '\n'`
ETCD_CA=`cat /etc/kubernetes/etcd/ssl/ca.pem | base64 | tr -d '\n'`

sed -i "s#.*etcd-cert:.*#  etcd-cert: ${ETCD_CERT}#g" calico.yaml
sed -i "s#.*etcd-key:.*#  etcd-key: ${ETCD_KEY}#g" calico.yaml
sed -i "s#.*etcd-ca:.*#  etcd-ca: ${ETCD_CA}#g" calico.yaml

sed -i 's#.*etcd_ca:.*#  etcd_ca: "/calico-secrets/etcd-ca"#g' calico.yaml
sed -i 's#.*etcd_cert:.*#  etcd_cert: "/calico-secrets/etcd-cert"#g' calico.yaml
sed -i 's#.*etcd_key:.*#  etcd_key: "/calico-secrets/etcd-key"#g' calico.yaml

sed -i "s#__ETCD_KEY_FILE__#/etc/kubernetes/etcd/ssl/etcd-key.pem#g" calico.yaml
sed -i "s#__ETCD_CERT_FILE__#/etc/kubernetes/etcd/ssl/etcd.pem#g" calico.yaml
sed -i "s#__ETCD_CA_CERT_FILE__#/etc/kubernetes/etcd/ssl/ca.pem#g" calico.yaml
sed -i "s#__KUBECONFIG_FILEPATH__#/etc/cni/net.d/calico-kubeconfig#g" calico.yaml
  • 配置calico bgp 而且修改ip cidr:172.16.0.0/16
sed -i '/CALICO_IPV4POOL_IPIP/{n;s/Always/off/g}' calico.yaml
sed -i '/CALICO_IPV4POOL_CIDR/{n;s/192.168.0.0/172.16.0.0/g}' calico.yaml

5.1.3 kubectl安裝calico

kubectl apply -f calico.yaml
  • 注意 : 由於calico-node須要獲取操做系統的權限運行,因此要在apiserver、kubelet中加入--allow-privileged=true
  • 注意 : kubelet配置calico,加入 --network-plugin=cni
  • 注意 : Kube-Proxy配置 --cluster-cidr=172.16.0.0/16
  • 重啓對應服務

5.1.4 查看一下狀態

# kubectl get pod -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE            NOMINATED NODE
calico-kube-controllers-7875f976cd-gxfdj   1/1     Running   1          20m     192.168.2.113   192.168.2.113   <none>
calico-node-78gtd                          2/2     Running   2          20m     192.168.2.111   192.168.2.111   <none>
calico-node-dxw6z                          2/2     Running   2          20m     192.168.2.113   192.168.2.113   <none>
calico-node-wvrxd                          2/2     Running   2          20m     192.168.2.112   192.168.2.112   <none>

6,部署kubernetes DNS(在master執行)

6.1 下載配置文件

wget https://github.com/kubernetes/kubernetes/releases/download/v1.12.4/kubernetes.tar.gz
tar -zxvf kubernetes.tar.gz
mv kubernetes/cluster/addons/dns/coredns/coredns.yaml.base /etc/kubernetes/coredns/coredns.yaml

6.2 修改配置文件

sed -i 's#kubernetes __PILLAR__DNS__DOMAIN__#kubernetes cluster.local.#g' coredns.yaml
sed -i 's#clusterIP: __PILLAR__DNS__SERVER__#clusterIP: 10.254.0.2#g' coredns.yaml

6.3 建立coreDNS

kubectl apply -f coredns.yaml

6.4 查看coreDNS服務狀態

kubectl get pod -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE            NOMINATED NODE
calico-kube-controllers-7875f976cd-gxfdj   1/1     Running   1          20m     192.168.2.113   192.168.2.113   <none>
calico-node-78gtd                          2/2     Running   2          20m     192.168.2.111   192.168.2.111   <none>
calico-node-dxw6z                          2/2     Running   2          20m     192.168.2.113   192.168.2.113   <none>
calico-node-wvrxd                          2/2     Running   2          20m     192.168.2.112   192.168.2.112   <none>
coredns-74c656b9f-9f8l8                    1/1     Running   0          3m56s   172.16.70.131   192.168.2.113   <none>
kubectl get svc --all-namespaces
NAMESPACE     NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes   ClusterIP   10.254.0.1      <none>        443/TCP         24h
kube-system   kube-dns     ClusterIP   10.254.0.2      <none>        53/UDP,53/TCP   27s

7,驗證集羣功能

7.1 查看節點情況

# kubectl get nodes
NAME            STATUS   ROLES    AGE     VERSION
192.168.2.111   Ready    <none>   3h39m   v1.12.4
192.168.2.112   Ready    <none>   3h30m   v1.12.4
192.168.2.113   Ready    <none>   3h30m   v1.12.4

7.2 建立nginx web測試文件

# cat > nginx-web.yml << EOF
apiVersion: v1
kind: Service
metadata:
  name: nginx-web
  labels:
    tier: frontend
spec:
  type: NodePort
  selector:
    tier: frontend
  ports:
  - name: http
    port: 80
    targetPort: 80
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-con
  labels:
    tier: frontend
spec:
  replicas: 3
  template:
    metadata:
      labels:
        tier: frontend
    spec:
      containers:
      - name: nginx-pod
        image: nginx
        ports:
        - containerPort: 80
EOF
  • 執行nginx-web.yaml文件
kubectl create -f nginx-web.yml

7.3 查看各個Node上Pod IP的連通性

# kubectl get pod -o wide
NAME                         READY   STATUS    RESTARTS   AGE   IP               NODE            NOMINATED NODE
nginx-con-594b8d6b48-47b5l   1/1     Running   0          12s   172.16.70.135    192.168.2.113   <none>
nginx-con-594b8d6b48-f2pzv   1/1     Running   0          12s   172.16.200.9     192.168.2.111   <none>
nginx-con-594b8d6b48-g99mm   1/1     Running   0          12s   172.16.141.196   192.168.2.112   <none>
  • nginx 的 Pod IP 分別是 172.16.70.13五、172.16.200.9 、 172.16.141.196,在全部 Node 上分別 ping 這三個 IP,看是否連通
# ping -c 3 172.16.70.135
PING 172.16.70.135 (172.16.70.135) 56(84) bytes of data.
64 bytes from 172.16.70.135: icmp_seq=1 ttl=63 time=0.346 ms
64 bytes from 172.16.70.135: icmp_seq=2 ttl=63 time=0.145 ms
64 bytes from 172.16.70.135: icmp_seq=3 ttl=63 time=0.161 ms

--- 172.16.70.135 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.145/0.217/0.346/0.092 ms
# ping -c 3 172.16.200.9
PING 172.16.200.9 (172.16.200.9) 56(84) bytes of data.
64 bytes from 172.16.200.9: icmp_seq=1 ttl=63 time=0.261 ms
64 bytes from 172.16.200.9: icmp_seq=2 ttl=63 time=0.187 ms
64 bytes from 172.16.200.9: icmp_seq=3 ttl=63 time=0.221 ms

--- 172.16.200.9 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.187/0.223/0.261/0.030 ms
# ping -c 3 172.16.141.196
PING 172.16.141.196 (172.16.141.196) 56(84) bytes of data.
64 bytes from 172.16.141.196: icmp_seq=1 ttl=63 time=0.379 ms
64 bytes from 172.16.141.196: icmp_seq=2 ttl=63 time=0.221 ms
64 bytes from 172.16.141.196: icmp_seq=3 ttl=63 time=0.233 ms

--- 172.16.141.196 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.221/0.277/0.379/0.074 ms

7.4 查看server的集羣ip

# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.254.0.1      <none>        443/TCP        43h
nginx-web    NodePort    10.254.29.144   <none>        80:30945/TCP   11m
  • 10.254.29.144 爲nginx service的集羣ip,代理的是前面的三個pod容器應用。
  • PORT 80是集羣IP的端口,30945是node節點上的端口,能夠用nodeip:nodeport方式訪問服務

7.5 訪問服務可達性

  • 用局域網的任意其餘主機訪問應用,nodeip:nodeprot方式 (這裏nodeip是私網,因此用局域網的其餘主機訪問)
# curl -I 192.168.2.111:30945
HTTP/1.1 200 OK
Server: nginx/1.15.8
Date: Thu, 17 Jan 2019 03:43:21 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT
Connection: keep-alive
ETag: "5c21fedf-264"
Accept-Ranges: bytes
  • 在calico 網絡的主機上使用集羣ip訪問應用
curl -I 10.254.29.144
HTTP/1.1 200 OK
Server: nginx/1.15.8
Date: Thu, 17 Jan 2019 03:44:06 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT
Connection: keep-alive
ETag: "5c21fedf-264"
Accept-Ranges: bytes

7.6 建立一個簡單的centos 測試 coreDNS

  • centos.yaml
# cat centos.yaml
apiVersion: v1
kind: Pod
metadata:
  name: centos-test
  namespace: default
spec:
  containers:
  - image: centos
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
    name: centos-test
  restartPolicy: Always
  • 建立
kubectl create -f centos.yaml
  • 進入容器 下載 curl nslookup
#  kubectl exec -it centos-test -- yum install bind-utils curl -y
  • 驗證
# kubectl exec -it centos-test -- curl -I 192.168.2.100:30945
HTTP/1.1 200 OK
Server: nginx/1.15.8
Date: Thu, 17 Jan 2019 04:57:53 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT
Connection: keep-alive
ETag: "5c21fedf-264"
Accept-Ranges: bytes
# kubectl exec -it centos-test -- curl -I  nginx-web.default.svc.cluster.local
HTTP/1.1 200 OK
Server: nginx/1.15.8
Date: Thu, 17 Jan 2019 04:58:56 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT
Connection: keep-alive
ETag: "5c21fedf-264"
Accept-Ranges: bytes
# kubectl exec -it centos-test -- nslookup nginx-web.default.svc.cluster.local
Server:     10.254.0.2
Address:    10.254.0.2#53

Name:   nginx-web.default.svc.cluster.local
Address: 10.254.29.144

8 ,部署 metrics

8.1 生成證書

  • front-proxy-csr.json
# cat > front-proxy-csr.json << EOF
{
  "CN": "system:front-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Beijing",
      "ST": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
  • 生成證書
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-csr.json | cfssljson -bare front-proxy
  • 分發證書
USER=root
CONTROL_PLANE_IPS="k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03"
for host in $CONTROL_PLANE_IPS; do
    scp front-proxy-key.pem front-proxy.pem "${USER}"@$host:/etc/kubernetes/server/ssl/
done

8.2 安裝以前須要爲kubernetes增長配置項

  • 爲/usr/lib/systemd/systemcontroller-manager增長啓動項
--horizontal-pod-autoscaler-use-rest-clients=true
  • 爲/usr/lib/systemd/system/kube-apiserver.service增長啓動項
--requestheader-client-ca-file=/etc/kubernetes/server/ssl/ca.pem  \
  --requestheader-allowed-names=aggregator  \
  --requestheader-extra-headers-prefix=X-Remote-Extra- \
  --requestheader-group-headers=X-Remote-Group \
  --requestheader-username-headers=X-Remote-User \
  --proxy-client-cert-file=/etc/kubernetes/server/ssl/front-proxy.pem \
  --proxy-client-key-file=/etc/kubernetes/server/ssl/front-proxy-key.pem \
  --enable-aggregator-routing=true
  • 啓動服務
systemctl daemon-reload
systemctl restart kube-apiserver
systemctl restart kube-controller-manager

8.3 下載 metrics 配置文件

  • 方法一:使用官方的配置文件
wget https://github.com/kubernetes/kubernetes/releases/download/v1.12.4/kubernetes.tar.gz
tar zxvf kubernetes.tar.gz
cp -a kubernetes/cluster/addons/metrics-server/ /etc/kubernetes/
  • 方法二:參考本人GitHub上的配置文件
    • https://github.com/xiaoqshuo/k8-ha-install/tree/Binary_deployment_12.4/metrics-server

8.4 更改配置文件

# cat > metrics-server-deployment.yaml << EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metrics-server
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    k8s-app: metrics-server
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      serviceAccountName: metrics-server
      volumes:
      # mount in tmp so we can safely use from-scratch images and/or read-only containers
      - name: tmp-dir
        emptyDir: {}
      containers:
      - name: metrics-server
        image: xiaoqshuo/metrics-server-amd64:v0.3.1
        imagePullPolicy: Always
        command:
        - /metrics-server
        - --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalIP
        volumeMounts:
        - name: tmp-dir
          mountPath: /tmp
EOF

8.5 建立 metrics

# kubectl apply -f metrics-server

8.6 查看狀態

# kubectl get -n kube-system all -o wide| grep metrics

pod/metrics-server-56f4b88678-x9djk            1/1     Running   0          26m     172.16.200.12   192.168.2.111   <none>

service/metrics-server         ClusterIP   10.254.130.198   <none>        443/TCP         65m   k8s-app=metrics-server


deployment.apps/metrics-server            1         1         1            1           34m   metrics-server            xiaoqshuo/metrics-server-amd64:v0.3.1          k8s-app=metrics-server
replicaset.apps/metrics-server-56f4b88678            1         1         1       26m   metrics-server            xiaoqshuo/metrics-server-amd64:v0.3.1           k8s-app=metrics-server,pod-template-hash=56f4b88678

9,部署 dashboard

  • 參考:
    • https://github.com/kubernetes/dashboard#getting-started
    • https://github.com/kubernetes/dashboard/wiki/Creating-sample-user

9.1 下載配置文件

wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml

9.2 修改配置文件

  • kubernetes-dashboard.yaml 修改鏡像增長 NodePort
# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard
  • user-admin.yaml
# cat > user-admin.yaml << EOF
# ------------------- ServiceAccount ------------------- #

apiVersion: v1
kind: ServiceAccount
metadata:
  name: user-admin
  namespace: kube-system

---
# ------------------- ClusterRoleBinding ------------------- #

apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: user-admin
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: user-admin
  namespace: kube-system
EOF

9.3 建立 dashboard

kubectl apply -f kubernetes-dashboard.yaml 
kubectl apply -f user-admin.yaml

9.4 查看狀態

# kubectl get -n kube-system all -o wide| grep dashboard

pod/kubernetes-dashboard-66468c4f76-nfdwv      1/1     Running   0          20m     172.16.195.1    192.168.2.103   <none>
service/kubernetes-dashboard   NodePort    10.254.58.73     <none>        443:30000/TCP   21m   k8s-app=kubernetes-dashboard


deployment.apps/kubernetes-dashboard      1         1         1            1           21m   kubernetes-dashboard      xiaoqshuo/kubernetes-dashboard-amd64:v1.10.1   k8s-app=kubernetes-dashboard

replicaset.apps/kubernetes-dashboard-66468c4f76      1         1         1       20m   kubernetes-dashboard      xiaoqshuo/kubernetes-dashboard-amd64:v1.10.1    k8s-app=kubernetes-dashboard,pod-template-hash=66468c4f76

9.5 獲取token

kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep user-admin | awk '{print $1}')

9.5 UI訪問

  • 火狐瀏覽器 訪問 https://192.168.2.100:30000/#!/login

9.5.1 使用 Chrome 瀏覽器訪問

  • 方法一:須要在啓動程序 --> 屬性 --> 目標選項裏,追加 --test-type --ignore-certificate-errors
  • 方法二:刪除舊的證書secret,生成新的證書secret
#生成證書
openssl genrsa -out dashboard.key 2048 
openssl req -days 3650 -new -out dashboard.csr -key dashboard.key -subj '/CN=**172.23.0.217**'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt 
#刪除舊的證書secret
kubectl delete secret kubernetes-dashboard-certs -n kube-system
#建立新的證書secret
kubectl create secret generic kubernetes-dashboard-certs --from-file="dashboard.key,dashboard.crt" -n kube-system

  • 參考
    • https://zhangguanzhang.github.io/2018/09/18/kubernetes-1-11-x-bin/
    • https://www.cnblogs.com/harlanzhang/p/10116118.html
    • http://blog.51cto.com/lizhenliang/2325770
    • https://www.cnblogs.com/root0/p/9953287.html
    • http://blog.51cto.com/ylw6006/2316767
    • https://blog.csdn.net/mario_hao/article/details/80559354
    • https://www.cnblogs.com/MrVolleyball/p/9920964.html
相關文章
相關標籤/搜索