二進制文件方式安裝kubernetes集羣

全部操做所有用root使用者進行,高可用通常建議大於等於3臺的奇數,咱們使用3臺master來作高可用html

練習環境說明: 參考GitHubnode

master: kube-apiserver,kube-controller-manager,kube-scheduler,flanneldlinux

node: kubelet,kube-proxy,flannelnginx

Service_CIDR:10.254.0.0/16 服務網段,部署前路由不可達,部署後集羣內部使用IP:Port可達git

Cluster_CIDR:172.30.0.0/16 pod網段,部署前路由不可達,部署後路由可達(flanneld 保證)github

主機名稱 IP地址 部署軟件 備註
k8s-m12 192.168.10.12 keepalived+haproxy+etcd+master master
k8s-m13 192.168.10.13 keepalived+haproxy+etcd+master master
k8s-m14 192.168.10.14 keepalived+haproxy+etcd+master master
k8s-n15 192.168.10.15 node+docker work
k8s-n16 192.168.10.16 node+docker work
VIP 192.168.10.100 VIP

2.一、下載安裝包

kubernetes的GitHub網址https://github.com/kubernetes/kubernetes/releasesweb

下載Server Binaries中的 kubernetes-server-linux-amd64.tar.gz 安裝包docker

下載Node Binaries中的 kubernetes-node-linux-amd64.tar.gz 安裝包json

下載Client Binares中的 kubernetes-client-linux-amd64.tar.gz 安裝包bootstrap

各類CA證書類型參考

k8s各版本組件下載地址

https://github.com/kubernetes/kubernetes/tree/v1.14.3

#kubernetes
wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes-node-linux-amd64.tar.gz
wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes-client-linux-amd64.tar.gz
wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes-server-linux-amd64.tar.gz
wget https://storage.googleapis.com/kubernetes-release/release/v1.14.3/kubernetes.tar.gz
#etcd
wget https://github.com/etcd-io/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz
#flannel
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
#cni-plugins
wget https://github.com/containernetworking/plugins/releases/download/v0.8.1/cni-plugins-linux-amd64-v0.8.1.tgz
#docker
wget https://download.docker.com/linux/static/stable/x86_64/docker-18.09.6.tgz
#cfssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
#heapster
wget https://github.com/kubernetes-retired/heapster/archive/v1.5.4.tar.gz

2.二、環境準備

#1.12機器上生成密鑰,無密碼ssh登錄
ssh-keygen -t rsa
ssh-copy-id 192.168.10.13  #依次拷貝到其餘節點上

#2.關閉防火牆,如下點全部機器執行
systemctl stop firewalld
systemctl disable firewalld

#3.關閉swap分區
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

#4.關閉SELinux
sestatus    #查看selinux狀態
setenforce 0        #臨時關閉selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

#5.升級內核參考:https://www.cnblogs.com/fan-gx/p/11006762.html

#6.修改文件句柄數
cat <<EOF >>/etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
* soft nproc 65536
* hard nproc 65536
* soft  memlock  unlimited
* hard memlock  unlimited
EOF

#7.安裝ipvs
yum install ipvsadm ipset sysstat conntrack libseccomp -y
#開機加載內核模塊,並設置開機自動加載
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

#而後執行腳本
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#8.修改系統參數
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl --system
#-----------下面參考別人的---------#
# cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF

#9.在生產環境建議預留內存,避免因爲內存耗盡致使ssh連不上主機(32G的機器留2G,251的留3G, 500G的留5G)。下面是預留5G
echo 'vm.min_free_kbytes=5000000' >> /etc/sysctl.conf
sysctl -p

2.三、部署docker

二進制部署方式可參考:https://www.kubernetes.org.cn/3831.html 這裏爲了方便直接yum安裝全部節點

#1.安裝yum源工具包
yum install -y yum-utils device-mapper-persistent-data lvm2

#2.下載docker-ce官方的yum源配置文件
# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

#3.禁用docker-c-edge源配edge是不開發版,不穩定,下載stable版
yum-config-manager --disable docker-ce-edge
#4.更新本地YUM源緩存
yum makecache fast
#5.安裝Docker-ce相應版本
yum -y install docker-ce
#6.配置daemon, 由於kubelet的啓動環境變量要與docker的cgroup-driver驅動相同,如下是官方推薦處理方式(如今新版二進制kubelet就是cgroup了)
#因爲國內拉取鏡像較慢,配置文件最後追加了阿里雲鏡像加速配置。
mkdir -p /etc/docker && 
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"]
}
EOF
#7.設置開機自啓動
systemctl restart docker && systemctl enable docker && systemctl status docker

#8.能夠先在本身電腦下來安裝包,本環境安裝的是18.09版本
yum install --downloadonly docker-ce-18.09 --downloaddir=/opt   #yum下載docker-ce
yum localinstall docker-ce -y       #而後安裝

2.四、部署etcd

etcd是用來保存集羣全部狀態的 Key/Value 存儲系統,經常使用於服務發現、共享配置以及併發控制(如 leader 選舉、分佈式鎖等)。kubernetes 使用 etcd 存儲全部運行數據。

全部 Kubernetes 組件會經過 API Server 來跟 Etcd 進行溝通從而保存或讀取資源狀態。有條件的能夠單獨幾臺機器跑,不過須要配置apiserver指向etcd集羣。

2.4.一、建立etcd證書

若是不但願將cfssl工具安裝到部署主機上,能夠在其餘的主機上進行該步驟,生成之後將證書拷貝到部署etcd的主機上便可。不是要證書也能夠部署,etcd.service文件和etcd.conf文件不要有https的URL

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo

chmod +x /usr/local/bin/cfssl*

#配置CA文件
mkdir /root/ssl && cd /root/ssl

cat >  ca-config.json <<EOF
{
"signing": {
"default": {
  "expiry": "8760h"
},
"profiles": {
  "kubernetes": {
    "usages": [
        "signing",
        "key encipherment",
        "server auth",
        "client auth"
    ],
    "expiry": "8760h"
  }
}
}
}
EOF

#2----------------------------------------------
cat >  ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
  "C": "CN",
  "ST": "ShangHai",
  "L": "ShangHai",
  "O": "k8s",
  "OU": "System"
}
]
}
EOF

#3--------------------------------------------------
cat > etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.10.12",
    "192.168.10.13",
    "192.168.10.14"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
# hosts字段的IP地址是指受權使用證書的etcd地址
#------------------------------------
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

#生產後證書包含文件以下,共9個
ca-config.json
ca.csr
ca-csr.json
ca-key.pem
ca.pem
etcd.csr
etcd-csr.json
etcd-key.pem
etcd.pem

#將生成好的etcd.pem和etcd-key.pem以及ca.pem三個文件拷貝到etcd機器上
mkdir -p /etc/kubernetes/ssl && cp *.pem /etc/kubernetes/ssl/
ssh -n 192.168.10.13 "mkdir -p /etc/kubernetes/ssl && exit"
ssh -n 192.168.10.14 "mkdir -p /etc/kubernetes/ssl && exit"

scp -r /etc/kubernetes/ssl/*.pem 192.168.10.13:/etc/kubernetes/ssl/
scp -r /etc/kubernetes/ssl/*.pem 192.168.10.14:/etc/kubernetes/ssl/

2.4.二、部署etcd

將下載的etcd二進制文件上傳到etcd節點機器上。

#在etcd的機器上安裝etcd程序
mkdir -p /var/lib/etcd

tar -zxvf etcd-v3.3.13-linux-amd64.tar.gz
cp etcd-v3.3.13-linux-amd64/etcd* /usr/local/bin
scp etcd-v3.3.13-linux-amd64/etcd* 192.168.10.13:/usr/local/bin
scp etcd-v3.3.13-linux-amd64/etcd* 192.168.10.14:/usr/local/bin

#1.在12機器上建立etcd.service文件
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --name k8s-m12 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.10.12:2380 \
  --listen-peer-urls https://192.168.10.12:2380 \
  --listen-client-urls https://192.168.10.12:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.10.12:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster k8s-m12=https://192.168.10.12:2380,k8s-m13=https://192.168.10.13:2380,k8s-m14=https://192.168.10.14:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

#2.啓動etcd服務
systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service && systemctl status etcd
#1.在13機器上建立etcd.service文件
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --name k8s-m13 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.10.13:2380 \
  --listen-peer-urls https://192.168.10.13:2380 \
  --listen-client-urls https://192.168.10.13:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.10.13:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster k8s-m12=https://192.168.10.12:2380,k8s-m13=https://192.168.10.13:2380,k8s-m14=https://192.168.10.14:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

#2.啓動etcd服務
systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service && systemctl status etcd
#1.在14機器上建立etcd.service文件
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --name k8s-m14 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.10.14:2380 \
  --listen-peer-urls https://192.168.10.14:2380 \
  --listen-client-urls https://192.168.10.14:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.10.14:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster k8s-m12=https://192.168.10.12:2380,k8s-m13=https://192.168.10.13:2380,k8s-m14=https://192.168.10.14:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

#2.啓動etcd服務
systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service && systemctl status etcd

2.4.三、驗證集羣

#1.查看集羣狀態
etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
#返回以下,表明集羣正常
member 1af68d968c7e3f22 is healthy: got healthy result from https://192.168.10.12:2379
member 55204c19ed228077 is healthy: got healthy result from https://192.168.10.14:2379
member e8d9a97b17f26476 is healthy: got healthy result from https://192.168.10.13:2379
cluster is healthy

#2.查看集羣成員
etcdctl --endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379  --ca-file=/etc/kubernetes/ssl/ca.pem  --cert-file=/etc/kubernetes/ssl/etcd.pem  --key-file=/etc/kubernetes/ssl/etcd-key.pem member list
#返回以下結果
1af68d968c7e3f22: name=k8s-m12 peerURLs=https://192.168.10.12:2380 clientURLs=https://192.168.10.12:2379 isLeader=false
55204c19ed228077: name=k8s-m14 peerURLs=https://192.168.10.14:2380 clientURLs=https://192.168.10.14:2379 isLeader=false
e8d9a97b17f26476: name=k8s-m13 peerURLs=https://192.168.10.13:2380 clientURLs=https://192.168.10.13:2379 isLeader=true

2.五、部署flannel

全部的節點都須要安裝flannel,,主要目的是跨主機的docker可以互相通訊,也是保障kubernetes集羣的網絡基礎和保障

2.5.一、建立flannel證書

#1.生產TLS證書,是讓kubectl當作client證書使用,(證書只須要生成一次)
cd /root/ssl
cat > flanneld-csr.json <<EOF
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

#2.生成證書和私鑰
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
#包含如下文件
flanneld.csr
flanneld-csr.json
flanneld-key.pem
flanneld.pem

#3.而後將證書拷貝到全部節點下。
cp flanneld*.pem /etc/kubernetes/ssl
scp flanneld*.pem 192.168.10.13:/etc/kubernetes/ssl
scp flanneld*.pem 192.168.10.14:/etc/kubernetes/ssl
scp flanneld*.pem 192.168.10.15:/etc/kubernetes/ssl
scp flanneld*.pem 192.168.10.16:/etc/kubernetes/ssl

2.5.二、部署flannel

#1.開始安裝flannel
tar -zvxf flannel-v0.11.0-linux-amd64.tar.gz
cp flanneld mk-docker-opts.sh /usr/local/bin
scp flanneld mk-docker-opts.sh 192.168.10.13:/usr/local/bin
scp flanneld mk-docker-opts.sh 192.168.10.14:/usr/local/bin
scp flanneld mk-docker-opts.sh 192.168.10.15:/usr/local/bin
scp flanneld mk-docker-opts.sh 192.168.10.16:/usr/local/bin

#2.向etcd寫入集羣Pod網段信息,在etcd集羣中任意一臺執行一次便可
etcdctl \
--endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/flanneld.pem \
--key-file=/etc/kubernetes/ssl/flanneld-key.pem \
mk /kubernetes/network/config '{"Network":"172.30.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
#----獲得返回信息以下,設置的網絡是172.30.0.0/16,子網掩碼是24位
{"Network":"172.30.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}

#2.1.列出鍵值存儲的目錄
etcdctl \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/flanneld.pem \
--key-file=/etc/kubernetes/ssl/flanneld-key.pem ls -r
#2.2.查看鍵值存儲
etcdctl \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/flanneld.pem \
--key-file=/etc/kubernetes/ssl/flanneld-key.pem get /kubernetes/network/config
#2.3查看已分配pod的子網列表
etcdctl \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/flanneld.pem \
--key-file=/etc/kubernetes/ssl/flanneld-key.pem ls  /kubernetes/network/subnets

#三、建立flannel.service文件
cat > /etc/systemd/system/flannel.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \
  -etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  -etcd-certfile=/etc/kubernetes/ssl/flanneld.pem \
  -etcd-keyfile=/etc/kubernetes/ssl/flanneld-key.pem \
  -etcd-endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \
  -etcd-prefix=/kubernetes/network
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
#mk-docker-opts.sh 腳本將分配給flanneld的Pod子網網段信息寫入到/run/flannel/docker文件中,後續docker啓動時使用這個文件中參數值設置docker0網橋。
#flanneld 使用系統缺省路由所在的接口和其它節點通訊,對於有多個網絡接口的機器(如,內網和公網),能夠用 -iface=enpxx 選項值指定通訊接口。

#四、啓動flannel
systemctl daemon-reload && systemctl enable flannel && systemctl start flannel && systemctl status flannel

#5.驗證flannel
cat /run/flannel/docker             #/run/flannel/docker是flannel分配給docker的子網信息,
#顯示以下
DOCKER_OPT_BIP="--bip=172.30.7.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=true"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.30.7.1/24 --ip-masq=true --mtu=1450"

cat /run/flannel/subnet.env         #/run/flannel/subnet.env包含了flannel整個大網段以及在此節點上的子網段
#顯示以下
FLANNEL_NETWORK=172.30.0.0/16
FLANNEL_SUBNET=172.30.7.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false

ip add | grep flannel       #查看網卡信息
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 
    inet 172.30.7.0/32 scope global flannel.1

2.5.三、配置docker支持flannel

#1.配置docker支持flannel網絡,須要在[Service]標籤下新加
vim /etc/systemd/system/multi-user.target.wants/docker.service
EnvironmentFile=/run/flannel/docker     #這行新加內容,下面行新加$後面的內容
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock $DOCKER_NETWORK_OPTIONS

#2.重啓docker,而後能夠查看到已分配pod的子網列表
systemctl daemon-reload && systemctl restart docker && systemctl status docker

ip add | grep docker
#docker0網口IP地址,已改變
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    inet 172.30.7.1/24 brd 172.30.7.255 scope global docker0

2.5.四、設置CNI插件支持flannel

tar -zxvf cni-plugins-linux-amd64-v0.8.1.tgz -C /opt/cni
mkdir -p /etc/cni/net.d
cat > /etc/cni/net.d/10-default.conf <<EOF
{
    "name": "flannel",
    "type": "flannel",
    "delegate": {
        "bridge": "docker0",
        "isDefaultGateway": true,
        "mtu": 1400
    }
}
EOF

#把相關指令和文件拷貝到全部主機
scp /opt/cni/* 192.168.10.13:/usr/local/bin && scp /etc/cni/net.d/* 192.168.10.13:/etc/cni/net.d/

2.六、部署keepalived+haproxy

keepalived 提供 kube-apiserver 對外服務的 VIP;haproxy 監聽 VIP,後端鏈接全部 kube-apiserver 實例,提供健康檢查和負載均衡功能;

本文檔複用 master 節點的三臺機器,haproxy 監聽的端口(8443) 須要與 kube-apiserver 的端口 6443 不一樣,避免衝突。

keepalived 在運行過程當中週期檢查本機的 haproxy 進程狀態,若是檢測到 haproxy 進程異常,則觸發從新選主的過程,VIP 將飄移到新選出來的主節點,從而實現 VIP 的高可用。全部組件(如 kubeclt、apiserver、controller-manager、scheduler 等)都經過 VIP 和 haproxy 監聽的 8443 端口訪問 kube-apiserver 服務。

2.6.一、安裝haproxy

yum install -y haproxy

#12機器上配置
cat << EOF > /etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

defaults
    mode                    tcp
    log                     global
    retries                 3
    timeout connect         10s
    timeout client          1m
    timeout server          1m

listen  admin_stats
    bind 0.0.0.0:9090
    mode http
    log 127.0.0.1 local0 err
    stats refresh 30s
    stats uri /status
    stats realm welcome login\ Haproxy
    stats auth admin:123456
    stats hide-version
    stats admin if TRUE

frontend kubernetes
    bind *:8443
    mode tcp
    default_backend kubernetes-master

backend kubernetes-master
    balance roundrobin
    server k8s-m12 192.168.10.12:6443 check maxconn 2000
    server k8s-m13 192.168.10.13:6443 check maxconn 2000
    server k8s-m14 192.168.10.14:6443 check maxconn 2000
EOF

#13 和 14機器上配置都同樣

# 啓動haproxy
systemctl enable haproxy && systemctl start haproxy && systemctl status haproxy

2.6.二、安裝keepalived

yum install -y keepalived

#10.12機器上配置

cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.10.100:8443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 100
    priority 100
    advert_int 1
    mcast_src_ip 192.168.10.12
    nopreempt
    authentication {
        auth_type PASS
        auth_pass fana123
    }
    unicast_peer {
        192.168.10.13
        192.168.10.14
    }
    virtual_ipaddress {
        192.168.10.100/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

#13機器keepalived配置
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.10.100:8443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 100
    priority 90
    advert_int 1
    mcast_src_ip 192.168.10.13
    nopreempt
    authentication {
        auth_type PASS
        auth_pass fana123
    }
    unicast_peer {
        192.168.10.12
        192.168.10.14
    }
    virtual_ipaddress {
        192.168.10.100/24
    }
    track_script {
        CheckK8sMaster
    }
}
EOF

#14機器上keepalived配置
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.10.100:8443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 100
    priority 80
    advert_int 1
    mcast_src_ip 192.168.10.14
    nopreempt
    authentication {
        auth_type PASS
        auth_pass fana123
    }
    unicast_peer {
        192.168.10.12
        192.168.10.13
    }
    virtual_ipaddress {
        192.168.10.100/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

#啓動keepalived
systemctl restart keepalived && systemctl enable keepalived && systemctl status keepalived

#查看vip
ip add | grep 10.100

2.七、部署master

kube-scheduler,kube-controller-manager 和 kube-apiserver 三者的功能緊密相關;同時kube-scheduler 和 kube-controller-manager 只能有一個進程處於工做狀態,若是運行多個,則須要經過選舉產生一個 leader;

2.7.一、部署kubectl命令工具

kubectl 是 kubernetes 集羣的命令行管理工具,默認從 ~/.kube/config 文件讀取 kube-apiserver 地址、證書、用戶名等信息,若是沒有配置,執行 kubectl 命令時可能會出錯。~/.kube/config只須要部署一次,而後拷貝到其餘的master。

#1.解壓命令
tar -zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kubeadm kube-controller-manager kubectl kube-scheduler /usr/local/bin
scp kube-apiserver kubeadm kube-controller-manager kubectl kube-scheduler 192.168.10.13:/usr/local/bin
scp kube-apiserver kubeadm kube-controller-manager kubectl kube-scheduler 192.168.10.14:/usr/local/bin

#2.建立CA證書
cd /root/ssl
cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

#3.生成證書和私鑰
cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes admin-csr.json | cfssljson -bare admin

#4.建立~/.kube/config文件
kubectl config set-cluster kubernetes \
  --certificate-authority=ca.pem \
  --embed-certs=true \
  --server=https://192.168.10.100:8443 \
  --kubeconfig=kubectl.kubeconfig

#4.1.設置客戶端認證參數
kubectl config set-credentials admin \
  --client-certificate=admin.pem \
  --client-key=admin-key.pem \
  --embed-certs=true \
  --kubeconfig=kubectl.kubeconfig

#4.2.設置上下文參數
kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin \
  --kubeconfig=kubectl.kubeconfig
  
#4.3.設置默認上下文
kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig

#4.4.拷貝kubectl.kubeconfig文件
cp kubectl.kubeconfig ~/.kube/config
scp kubectl.kubeconfig 192.168.10.13:/root/.kube/config
scp kubectl.kubeconfig 192.168.10.14:/root/.kube/config

cp admin*.pem /etc/kubernetes/ssl/
scp admin*.pem 192.168.10.13:/etc/kubernetes/ssl/
scp admin*.pem 192.168.10.14:/etc/kubernetes/ssl/

2.7.二、部署api-server

#1.建立CA證書,hosts字段指定受權使用該證書的IP或域名列表,這裏列出了VIP/apiserver節點IP/kubernetes服務IP和域名
cd /root/ssl
cat > kubernetes-csr.json <<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.10.12",
    "192.168.10.13",
    "192.168.10.14",
    "192.168.10.100",
    "10.254.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

#2.生成證書和私鑰
cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

#3.將證書拷貝到其餘master節點
cp kubernetes*.pem /etc/kubernetes/ssl/
scp kubernetes*.pem 192.168.10.13:/etc/kubernetes/ssl/
scp kubernetes*.pem 192.168.10.14:/etc/kubernetes/ssl/

#4.建立加密配置文件
cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: $(head -c 32 /dev/urandom | base64)
      - identity: {}
EOF

#4.1建立kube-apiserver使用的客戶端令牌文件
cat <<EOF > bootstrap-token.csv
$(head -c 32 /dev/urandom | base64),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

#5.將加密文件拷貝到其餘master節點
cp encryption-config.yaml bootstrap-token.csv /etc/kubernetes/ssl
scp encryption-config.yaml bootstrap-token.csv 192.168.10.13:/etc/kubernetes/ssl
scp encryption-config.yaml bootstrap-token.csv 192.168.10.14:/etc/kubernetes/ssl

#6.建立kube-apiserver.service文件
cat > /etc/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \
  --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --experimental-encryption-provider-config=/etc/kubernetes/ssl/encryption-config.yaml \
  --advertise-address=0.0.0.0 \
  --bind-address=0.0.0.0 \
  --insecure-bind-address=127.0.0.1 \
  --secure-port=6443 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.254.0.0/16 \
  --service-node-port-range=30000-32700 \
  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
  --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
  --etcd-servers=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kubernetes/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

mkdir -p /var/log/kubernetes        #建立日誌目錄而後拷貝到其餘master
scp /etc/systemd/system/kube-apiserver.service 192.168.10.13:/etc/systemd/system/
scp /etc/systemd/system/kube-apiserver.service 192.168.10.14:/etc/systemd/system/

#7.啓動服務

systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver && systemctl status kube-apiserver

#8.授予kubernetes證書訪問kubelet api權限。在執行kubectl exec、run、logs 等命令時,apiserver會轉發到kubelet。這裏定義 RBAC規則,受權apiserver調用kubelet API。
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

#8.1預約義的ClusterRole system:kubelet-api-admin授予訪問kubelet全部 API 的權限:
kubectl describe clusterrole system:kubelet-api-admin

#9.檢查api-server和集羣狀態
netstat -ptln | grep kube-apiserver
tcp        0      0 192.168.10.12:6443      0.0.0.0:*               LISTEN      13000/kube-apiserve 

kubectl cluster-info
#顯示以下
Kubernetes master is running at https://192.168.10.100:8443
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

kubectl get all --all-namespaces
#顯示以下
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.254.0.1   <none>        443/TCP   12m

kubectl get componentstatuses
#顯示以下,因scheduler和controller-manager尚未部署
NAME                 STATUS      MESSAGE                                ERROR
scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused   
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   
etcd-2               Healthy     {"health":"true"}
etcd-1               Healthy     {"health":"true"} 
etcd-0               Healthy     {"health":"true"}

2.7.三、部署kube-controller-manager

該集羣包含 3 個節點,啓動後將經過競爭選舉機制產生一個 leader 節點,其它節點爲阻塞狀態。當 leader 節點不可用後,剩餘節點將再次進行選舉產生新的 leader 節點,從而保證服務的可用性。

#1.建立CA證書
cd /root/ssl
cat > kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.10.12",
      "192.168.10.13",
      "192.168.10.14"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "ShangHai",
        "L": "ShangHai",
        "O": "system:kube-controller-manager",
        "OU": "System"
      }
    ]
}
EOF

#2.生成證書
cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

#3.將證書拷貝到其餘master節點
cp kube-controller-manager*.pem /etc/kubernetes/ssl/
scp kube-controller-manager*.pem 192.168.10.13:/etc/kubernetes/ssl/
scp kube-controller-manager*.pem 192.168.10.14:/etc/kubernetes/ssl/

#4.建立kubeconfig文件
kubectl config set-cluster kubernetes \
  --certificate-authority=ca.pem \
  --embed-certs=true \
  --server=https://192.168.10.100:8443 \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=kube-controller-manager.pem \
  --client-key=kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-context system:kube-controller-manager \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

#5.拷貝kube-controller-manager.kubeconfig到其餘master節點
cp kube-controller-manager.kubeconfig /etc/kubernetes/ssl/
scp kube-controller-manager.kubeconfig 192.168.10.13:/etc/kubernetes/ssl/
scp kube-controller-manager.kubeconfig 192.168.10.14:/etc/kubernetes/ssl/

#6.建立kube-controller-manager.service文件
cat > /etc/systemd/system/kube-controller-manager.service  << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=https://192.168.10.100:8443 \
  --kubeconfig=/etc/kubernetes/ssl/kube-controller-manager.kubeconfig \
  --allocate-node-cidrs=true \
  --authentication-kubeconfig=/etc/kubernetes/ssl/kube-controller-manager.kubeconfig \
  --service-cluster-ip-range=10.254.0.0/16 \
  --cluster-cidr=172.30.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --experimental-cluster-signing-duration=8760h \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-use-rest-clients=true \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

#7.拷貝到其餘master節點,而後啓動服務
scp /etc/systemd/system/kube-controller-manager.service 192.168.10.13:/etc/systemd/system/
scp /etc/systemd/system/kube-controller-manager.service 192.168.10.14:/etc/systemd/system/

systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager && systemctl status kube-controller-manager

#8.檢查服務
netstat -lnpt|grep kube-controll
tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      14492/kube-controll 
tcp6       0      0 :::10257                :::*                    LISTEN      14492/kube-controll 

kubectl get cs
#顯示以下
NAME                 STATUS      MESSAGE                                               ERROR
scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused 
controller-manager   Healthy     ok                               
etcd-1               Healthy     {"health":"true"}
etcd-2               Healthy     {"health":"true"}
etcd-0               Healthy     {"health":"true"}

#檢查leader所在機器
kubectl get endpoints kube-controller-manager --namespace=kube-system  -o yaml
#顯示以下,k8s-m12選爲leader
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-m12_6f9b09e6-995b-11e9-b2bf-000c29959a05","leaseDurationSeconds":15,"acquireTime":"2019-06-28T04:16:00Z","renewTime":"2019-06-28T04:21:32Z","leaderTransitions":0}'
  creationTimestamp: "2019-06-28T04:16:00Z"
  name: kube-controller-manager
  namespace: kube-system
  resourceVersion: "1481"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  uid: 6f9d838f-995b-11e9-9cb7-000c29959a05

關於 controller 權限和 use-service-account-credentials 參數
kublet 認證和受權

2.7.四、部署kube-scheduler

該集羣包含 3 個節點,啓動後將經過競爭選舉機制產生一個 leader 節點,其它節點爲阻塞狀態。當 leader 節點不可用後,剩餘節點將再次進行選舉產生新的 leader 節點,從而保證服務的可用性

#1.建立CA證書
cd /root/ssl
cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "192.168.10.12",
      "192.168.10.13",
      "192.168.10.14"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "ShangHai",
        "L": "ShangHai",
        "O": "system:kube-scheduler",
        "OU": "System"
      }
    ]
}
EOF

#2.生成證書
cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

#3.建立kube-scheduler.kubeconfig文件
kubectl config set-cluster kubernetes \
  --certificate-authority=ca.pem \
  --embed-certs=true \
  --server=https://192.168.10.100:8443 \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler \
  --client-certificate=kube-scheduler.pem \
  --client-key=kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-context system:kube-scheduler \
  --cluster=kubernetes \
  --user=system:kube-scheduler \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

#4.拷貝kubeconfig到其餘master節點
cp kube-scheduler.kubeconfig kube-scheduler*.pem /etc/kubernetes/ssl/
scp kube-scheduler.kubeconfig kube-scheduler*.pem 192.168.10.13:/etc/kubernetes/ssl/
scp kube-scheduler.kubeconfig kube-scheduler*.pem 192.168.10.14:/etc/kubernetes/ssl/

#5.建立kube-scheduler.service文件
cat > /etc/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=https://192.168.10.100:8443 \
  --kubeconfig=/etc/kubernetes/ssl/kube-scheduler.kubeconfig \
  --leader-elect=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

#6.將kube-scheduler.service拷貝到其餘master節點,而後啓動服務
scp /etc/systemd/system/kube-scheduler.service 192.168.10.13:/etc/systemd/system
scp /etc/systemd/system/kube-scheduler.service 192.168.10.14:/etc/systemd/system

systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler && systemctl status kube-scheduler

#7.檢查服務
netstat -lnpt|grep kube-sche
tcp        0      0 127.0.0.1:10251         0.0.0.0:*               LISTEN      15137/kube-schedule 
tcp6       0      0 :::10259                :::*                    LISTEN      15137/kube-schedule

kubectl get cs
#顯示以下
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"} 

kubectl get endpoints kube-scheduler --namespace=kube-system  -o yaml
#顯示以下,k8s-m12選爲leader
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-m12_1c3f7882-995f-11e9-a5c1-000c29959a05","leaseDurationSeconds":15,"acquireTime":"2019-06-28T04:42:19Z","renewTime":"2019-06-28T04:45:18Z","leaderTransitions":0}'
  creationTimestamp: "2019-06-28T04:42:19Z"
  name: kube-scheduler
  namespace: kube-system
  resourceVersion: "2714"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  uid: 1cda2b3a-995f-11e9-ac7d-000c2928fce6

2.7.五、在全部master節點上查看功能是否正常

kubectl get componentstatuses
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}

2.八、部署node

node節點運行kubelet kube-proxy docker flannel。

2.8.一、部署kubelet

kubelet運行在每一個 worker 節點上,接收 kube-apiserver 發送的請求,管理 Pod 容器,執行交互式命令,如 exec、run、logs 等。kubelet 啓動時自動向 kube-apiserver註冊節點信息,內置的 cadvisor 統計和監控節點的資源使用狀況。

#1.解壓包,拷貝命令
tar -zxvf kubernetes-node-linux-amd64.tar.gz
cd /opt/kubernetes/node/bin
cp kubectl kubelet kube-proxy /usr/local/bin
scp kubectl kubelet kube-proxy 192.168.10.16:/usr/local/bin

#2.建立kubelet-bootstrap.kubeconfig文件(也是在12機器上執行)要建立3次分別是(k8s-m12,k8s-m13,k8s-m14)

#2.1.建立 token
cd /root/ssl
export BOOTSTRAP_TOKEN=$(kubeadm token create \
  --description kubelet-bootstrap-token \
  --groups system:bootstrappers:k8s-m12 \
  --kubeconfig ~/.kube/config)

#2.2.設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=ca.pem \
  --embed-certs=true \
  --server=https://192.168.10.100:8443 \
  --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig

#2.3.設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig

#2.4.設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig

#2.5.設置默認上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap-k8s-m12.kubeconfig

#3.查看kubeadm爲各節點建立的token
kubeadm token list --kubeconfig ~/.kube/config
#顯示以下
11rq5j.3f628cf6ura1hf2x   20h       2019-06-29T13:01:52+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-m14
8zamvk.rfat8wyzh8311f89   20h       2019-06-29T12:59:26+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-m12
lhxalz.busnf6izk82e0xqx   20h       2019-06-29T13:01:03+08:00   authentication,signing   kubelet-bootstrap-token   system:bootstrappers:k8s-m13

#3.1.r若是須要刪除建立的token
kubeadm token --kubeconfig ~/.kube/config delete lhxalz.busnf6izk82e0xqx        
# 建立的token有效期爲 1 天,超期後將不能再被使用,且會被kube-controller-manager的tokencleaner清理(若是啓用該 controller 的話)。
# kube-apiserver接收kubelet的bootstrap token後,將請求的user設置爲system:bootstrap;group設置爲 system:bootstrappers;

#3.2.查看各token關聯的secret
kubectl get secrets  -n kube-system     

#4.拷貝bootstrap kubeconfig文件到各個node機器上
scp kubelet-bootstrap-kube12.kubeconfig 192.168.10.15:/etc/kubernetes/ssl/kubelet-bootstrap.kubeconfig
scp kubelet-bootstrap-kube12.kubeconfig 192.168.10.16:/etc/kubernetes/ssl/kubelet-bootstrap.kubeconfig

#5.建立kubelet配置文件
cd /root/ssl
cat > kubelet.config.json <<EOF
{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "authentication": {
    "x509": {
      "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
    },
    "webhook": {
      "enabled": true,
      "cacheTTL": "2m0s"
    },
    "anonymous": {
      "enabled": false
    }
  },
  "authorization": {
    "mode": "Webhook",
    "webhook": {
      "cacheAuthorizedTTL": "5m0s",
      "cacheUnauthorizedTTL": "30s"
    }
  },
  "address": "0.0.0.0",
  "port": 10250,
  "readOnlyPort": 0,
  "cgroupDriver": "cgroupfs",
  "hairpinMode": "promiscuous-bridge",
  "serializeImagePulls": false,
  "featureGates": {
    "RotateKubeletClientCertificate": true,
    "RotateKubeletServerCertificate": true
  },
  "clusterDomain": "cluster.local",
  "clusterDNS": ["10.254.0.2"]
}
EOF

#6.拷貝到其餘主機,注意,能夠修改address爲本機IP地址
cp kubelet.config.json /etc/kubernetes/ssl
scp kubelet.config.json 192.168.10.15:/etc/kubernetes/ssl
scp kubelet.config.json 192.168.10.16:/etc/kubernetes/ssl

#7.建立kubelet.service文件
mkdir -p /var/log/kubernetes && mkdir -p /var/lib/kubelet   #先建立目錄

cat <<EOF > /etc/systemd/system/kubelet.service 
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/ssl/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/usr/local/bin/ \
  --fail-swap-on=false \
  --kubeconfig=/etc/kubernetes/ssl/kubelet.kubeconfig \
  --config=/etc/kubernetes/ssl/kubelet.config.json \
  --hostname-override=192.168.10.15 \
  --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  --allow-privileged=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --cgroup-driver=systemd \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

#拷貝到其餘主機,注意修改hostname-override爲本機IP地址

#8.Bootstrap Token Auth 和授予權限 ,須要先將bootstrap-token文件中的kubelet-bootstrap用戶賦予system:node-bootstrapper角色,而後kubelet纔有權限建立認證請求
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers

#9.啓動kubele服務
systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet && systemctl status kubelet

#10.檢查服務
netstat -lantp|grep kubelet
tcp        0      0 192.168.10.15:46936     192.168.10.100:8443     ESTABLISHED 15299/kubelet

#8.經過kubelet 的TLS 證書請求,kubelet 首次啓動時向kube-apiserver 發送證書籤名請求,必須經過後kubernetes 系統纔會將該 Node 加入到集羣。查看未受權的CSR 請求
kubectl get csr
NAME                                                   AGE   REQUESTOR                 CONDITION
node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU   16m   system:bootstrap:rhwf4g   Pending
node-csr-hiZbOHizDYsE_n36kfuSxWTmUzobCEnCpIXfN54Lh6Y   18m   system:bootstrap:rhwf4g   Pending

approve kubelet csr請求

#1.手動approve csr請求(推薦自動的方式)
kubectl certificate approve node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU #手動建立
#顯示以下
certificatesigningrequest.certificates.k8s.io/node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU approved

#1.1.查看Approve結果
kubectl describe csr node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU   
#顯示以下
Name:               node-csr-ZyWLfyY4nBb1GPBCCNGf2pCjbFKGHt04q50R1_3oprU
Labels:             <none>
Annotations:        <none>
CreationTimestamp:  Wed, 26 Jun 2019 15:12:40 +0800
Requesting User:    system:bootstrap:rhwf4g
Status:             Approved,Issued
Subject:
         Common Name:    system:node:192.168.10.16
         Serial Number:  
         Organization:   system:nodes
Events:  <none>

#1.2.特別多能夠用這樣的方式
kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
kubectl get csr|awk 'NR==3{print $1}'| xargs kubectl describe csr   #查看Approve結果

#2.自動approve csr請求(推薦),建立ClusterRoleBinding,分別用於自動 approve client、renew client、renew server 證書
cd /root/ssl
cat > csr-crb.yaml <<EOF
# Approve all CSRs for the group "system:bootstrappers"
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: auto-approve-csrs-for-group
 subjects:
 - kind: Group
   name: system:bootstrappers
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
   apiGroup: rbac.authorization.k8s.io
---
 # To let a node of the group "system:bootstrappers" renew its own credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-client-cert-renewal
 subjects:
 - kind: Group
   name: system:bootstrappers
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
   apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]
---
 # To let a node of the group "system:nodes" renew its own server credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-server-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: approve-node-server-renewal-csr
   apiGroup: rbac.authorization.k8s.io
EOF

#3.拷貝到其餘節點
cp csr-crb.yaml /etc/kubernetes/ssl
scp csr-crb.yaml 192.168.10.13:/etc/kubernetes/ssl
scp csr-crb.yaml 192.168.10.14:/etc/kubernetes/ssl

#4.生效配置
kubectl apply -f /etc/kubernetes/ssl/csr-crb.yaml

#5.驗證
kubectl get csr     #等待一段時間,查看CSR都被自動approve
#顯示以下
NAME                                                   AGE   REQUESTOR                 CONDITION
node-csr-cF4D5xoTEQCkK5QCsCAmsHGItlZ2cJ43RjkGXpM4BNw   38m   system:bootstrap:8zamvk   Approved,Issued
node-csr-lUIuS1_ggYM8Q95rgsUrBawzrsAXQ4QfYcP3BbPnWl8   36m   system:bootstrap:lhxalz   Approved,Issued


kubectl get --all-namespaces -o wide nodes      #全部節點均 ready
#顯示以下
NAME            STATUS   ROLES    AGE     VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
192.168.10.15   Ready    <none>   5m33s   v1.14.3   192.168.10.15   <none>        CentOS Linux 7 (Core)   4.4.103-1.el7.elrepo.x86_64   docker://18.9.6
192.168.10.16   Ready    <none>   54s     v1.14.3   192.168.10.16   <none>        CentOS Linux 7 (Core)   4.4.103-1.el7.elrepo.x86_64   docker://18.9.6

kubectl get nodes
NAME            STATUS   ROLES    AGE     VERSION
192.168.10.15   Ready    <none>   6m55s   v1.14.3
192.168.10.16   Ready    <none>   2m16s   v1.14.3

netstat -lnpt|grep kubelet
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      20302/kubelet       
tcp        0      0 192.168.10.15:10250     0.0.0.0:*               LISTEN      20302/kubelet       
tcp        0      0 127.0.0.1:37706         0.0.0.0:*               LISTEN      20302/kubelet       
tcp        0      0 192.168.10.15:60332     192.168.10.100:8443     ESTABLISHED 20302/kubelet 
#10248: healthz http 服務,10250; https API 服務;注意:未開啓只讀端口 10255;因爲關閉了匿名認證,同時開啓了 webhook 受權,全部訪問 10250 端口 https API 的請求都須要被認證和受權。

kublet api 認證和受權

kublet的配置文件kubelet.config.json配置了以下認證參數:

  • authentication.anonymous.enabled:設置爲 false,不容許匿名訪問 10250 端口;
  • authentication.x509.clientCAFile:指定簽名客戶端證書的 CA 證書,開啓 HTTPs 證書認證;
  • authentication.webhook.enabled=true:開啓 HTTPs bearer token 認證;

同時配置了以下受權參數:

  • authroization.mode=Webhook:開啓 RBAC 受權;
# kubelet 收到請求後,使用 clientCAFile 對證書籤名進行認證,或者查詢 bearer token 是否有效。若是二者都沒經過,則拒絕請求,提示 Unauthorized
curl -s --cacert /etc/kubernetes/ssl/ca.pem https://127.0.0.1:10250/metrics
curl -s --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization: Bearer 123456" https://192.168.10.15:10250/metrics

#經過認證後,kubelet 使用 SubjectAccessReview API 向 kube-apiserver 發送請求,查詢證書或 token 對應的 user、group 是否有操做資源的權限(RBAC);

#1.證書認證和受權

#權限不足的證書;
curl -s --cacert /etc/kubernetes/ssl/ca.pem --cert /etc/kubernetes/ssl/kube-controller-manager.pem --key /etc/kubernetes/ssl/kube-controller-manager-key.pem https://192.168.10.15:10250/metrics

#使用部署 kubectl 命令行工具時建立的、具備最高權限的 admin 證書;
curl -s --cacert /etc/kubernetes/ssl/ca.pem --cert /etc/kubernetes/ssl/admin.pem --key /etc/kubernetes/ssl/admin-key.pem https://192.168.10.15:10250/metrics|head

#2.bear token認證和受權:

# 建立一個ServiceAccount,將它和ClusterRole system:kubelet-api-admin綁定,從而具備調用kubelet API的權限:
kubectl create sa kubelet-api-test

kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin --serviceaccount=default:kubelet-api-test 
SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}')
TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}')
echo ${TOKEN}

curl -s --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization: Bearer ${TOKEN}" https://192.168.10.15:10250/metrics|head

# cadvisor 和 metrics
# cadvisor 統計所在節點各容器的資源(CPU、內存、磁盤、網卡)使用狀況,分別在本身的 http web 頁面(4194 端口)和 10250 以 promehteus metrics 的形式輸出。

# 瀏覽器訪問 http://192.168.10.15:4194/containers/ 能夠查看到 cadvisor 的監控頁面:
# 瀏覽器訪問 https://192.168.10.15:10250/metrics 和 https://192.168.10.15:10250/metrics/cadvisor 分別返回 kublet 和 cadvisor 的 metrics。

注意:kublet.config.json 設置 authentication.anonymous.enabled 爲 false,不容許匿名證書訪問 10250 的 https 服務;參考A.瀏覽器訪問kube-apiserver安全端口.md,建立和導入相關證書,而後訪問上面的 10250 端口;

#1.須要安裝jdk而後使用keytool工具
.\keytool -import -v -trustcacerts -alias appmanagement -file "E:\ca.pem" -storepass password -keystore cacerts
#2.而後在linux上執行
openssl pkcs12 -export -out admin.pfx -inkey admin-key.pem -in admin.pem -certfile ca.pem
#3.而後把證書導進去,就能夠正常訪問了

2.8.二、部署kube-proxy

kube-proxy 運行在全部 worker 節點上,,它監聽 apiserver 中 service 和 Endpoint 的變化狀況,建立路由規則來進行服務負載均衡。

#1.建立CA證書
cd /root/ssl
cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

#2.生成證書和私鑰
cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

#3.建立kubeconfig文件

#3.1.設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=ca.pem \
  --embed-certs=true \
  --server=https://192.168.10.100:8443 \
  --kubeconfig=kube-proxy.kubeconfig
#3.2.設置客戶端認證參數
kubectl config set-credentials kube-proxy \
  --client-certificate=kube-proxy.pem \
  --client-key=kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
#3.3.設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
#3.4.設置默認上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#4.拷貝到其餘節點
cp kube-proxy*.pem kube-proxy.kubeconfig /etc/kubernetes/ssl/
scp kube-proxy*.pem kube-proxy.kubeconfig 192.168.10.15:/etc/kubernetes/ssl/
scp kube-proxy*.pem kube-proxy.kubeconfig 192.168.10.16:/etc/kubernetes/ssl/

#5.建立kube-proxy配置文件
cd /root/ssl
cat >kube-proxy.config.yaml <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.10.15
clientConnection:
  kubeconfig: /etc/kubernetes/ssl/kube-proxy.kubeconfig
clusterCIDR: 172.30.0.0/16
healthzBindAddress: 192.168.10.15:10256
hostnameOverride: 192.168.10.15
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.10.15:10249
mode: "ipvs"
EOF

#6.拷貝到其餘節點
cp kube-proxy.config.yaml /etc/kubernetes/ssl/
scp kube-proxy.config.yaml 192.168.10.15:/etc/kubernetes/ssl/
scp kube-proxy.config.yaml 192.168.10.16:/etc/kubernetes/ssl/

#7.建立kube-proxy.service文件,而後拷貝到其餘節點
cat << EOF > /etc/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/ssl/kube-proxy.config.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes/kube-proxy \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

#8.啓動kube-proxy服務
mkdir -p /var/lib/kube-proxy && mkdir -p /var/log/kubernetes/kube-proxy
systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy && systemctl status kube-proxy

netstat -lnpt|grep kube-proxy   #查看端口

ipvsadm -ln     #查看ipvs路由規則
#顯示以下
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.254.0.1:443 rr
  -> 192.168.10.12:6443           Masq    1      0          0         
  -> 192.168.10.13:6443           Masq    1      0          0         
  -> 192.168.10.14:6443           Masq    1      0          0

2.8.三、驗證集羣功能

kubectl get nodes       #查看節點狀態

# 一、建立nginx 測試文件
cat << EOF > nginx-web.yml 
apiVersion: v1
kind: Service
metadata:
  name: nginx-web
  labels:
    tier: frontend
spec:
  type: NodePort
  selector:
    tier: frontend
  ports:
  - name: http
    port: 80
    targetPort: 80
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-con
  labels:
    tier: frontend
spec:
  replicas: 3
  template:
    metadata:
      labels:
        tier: frontend
    spec:
      containers:
      - name: nginx-pod
        image: nginx
        ports:
        - containerPort: 80
EOF

#2.執行文件
kubectl create -f nginx-web.yml     
#顯示已建立
service/nginx-web created
deployment.extensions/nginx-con created

#3.查看pod狀態
kubectl get pod -o wide
#顯示以下
NAME                         READY   STATUS    RESTARTS   AGE    IP            NODE            NOMINATED NODE   READINESS GATES
nginx-con-7dc84bdfb6-h6bt6   1/1     Running   0          105s   172.30.85.2   192.168.10.16   <none>           <none>
nginx-con-7dc84bdfb6-nt5qs   1/1     Running   0          105s   172.30.34.3   192.168.10.15   <none>           <none>
nginx-con-7dc84bdfb6-sfg87   1/1     Running   0          105s   172.30.34.2   192.168.10.15   <none>           <none>

#4.測試IP是否ping通
ping -c4 172.30.34.2
PING 172.30.34.2 (172.30.34.2) 56(84) bytes of data.
64 bytes from 172.30.34.2: icmp_seq=1 ttl=63 time=0.543 ms
64 bytes from 172.30.34.2: icmp_seq=2 ttl=63 time=0.684 ms
64 bytes from 172.30.34.2: icmp_seq=3 ttl=63 time=0.886 ms
64 bytes from 172.30.34.2: icmp_seq=4 ttl=63 time=0.817 ms

#5.查看server集羣IP
kubectl get svc     #顯示以下
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.254.0.1       <none>        443/TCP        37h
nginx-web    NodePort    10.254.153.104   <none>        80:31808/TCP   4m19s
# 10.254.153.104是nginx的集羣IP,代理前面3個pod,80是集羣IP的端口31808是nodeport端口

#6.curl訪問node_ip:nodeport
curl -I 192.168.10.15:31808     #狀態200表示訪問成功
HTTP/1.1 200 OK
Server: nginx/1.17.0
Date: Sat, 29 Jun 2019 05:03:15 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 21 May 2019 14:23:57 GMT
Connection: keep-alive
ETag: "5ce409fd-264"
Accept-Ranges: bytes

#7.在flannel網絡主機上訪問集羣IP
ip add | grep 10.254
    inet 10.254.0.1/32 brd 10.254.0.1 scope global kube-ipvs0
    inet 10.254.153.104/32 brd 10.254.153.104 scope global kube-ipvs0

curl -I http://10.254.153.104:80    #返回以下
HTTP/1.1 200 OK
Server: nginx/1.17.0
Date: Sat, 29 Jun 2019 05:05:56 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 21 May 2019 14:23:57 GMT
Connection: keep-alive
ETag: "5ce409fd-264"
Accept-Ranges: bytes

2.九、部署集羣插件

插件是集羣的附件組件,豐富和完善了集羣的功能

2.9.一、部署coredns插件

#1.將kubernetes-server-linux-amd64.tar.gz解壓後,再解壓其中的 kubernetes-src.tar.gz 文件
tar -zxvf kubernetes-src.tar.gz -C src   #coredns對應的目錄是:cluster/addons/dns

#2.修改配置文件
cd src/cluster/addons/dns/coredns
cp coredns.yaml.base /etc/kubernetes/coredns.yaml

sed -i "s/__PILLAR__DNS__DOMAIN__/cluster.local/g" /etc/kubernetes/coredns.yaml
sed -i "s/__PILLAR__DNS__SERVER__/10.254.0.2/g" /etc/kubernetes/coredns.yaml

#3.建立coredns
kubectl create -f /etc/kubernetes/coredns.yaml

#4.檢查codedns功能
kubectl -n kube-system get all -o wide
#顯示以下
NAME                          READY   STATUS    RESTARTS   AGE
pod/coredns-8854569d4-5vshp   1/1     Running   0          58m
#
NAME               TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
service/kube-dns   ClusterIP   10.254.0.2   <none>        53/UDP,53/TCP,9153/TCP   81m
#
NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   1/1     1            1           58m
#
NAME                                DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-8854569d4   1         1         1       58m
#4.1
kubectl -n kube-system describe pod coredns
#4.2
kubectl -n kube-system logs coredns-8854569d4-5vshp

#5.使用容器驗證
kubectl run dns-test --rm -it --image=alpine /bin/sh
#進入容器 ping 百度正常
ping www.baidu.com
PING www.baidu.com (182.61.200.6): 56 data bytes
64 bytes from 182.61.200.6: seq=0 ttl=127 time=41.546 ms
64 bytes from 182.61.200.6: seq=1 ttl=127 time=35.043 ms
64 bytes from 182.61.200.6: seq=2 ttl=127 time=38.977 ms
64 bytes from 182.61.200.6: seq=3 ttl=127 time=40.633 ms

#查看全部集羣pod
kubectl get --all-namespaces pods

#6.若是遇到鏡像下載不下來,能夠修改文件
sed -i "s/k8s.gcr.io/coredns/g" /etc/kubernetes/coredns.yaml

2.9.二、部署dashboard插件

參考
https://github.com/kubernetes/dashboard/wiki/Access-control
https://github.com/kubernetes/dashboard/issues/2558
https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/

#1.將kubernetes-server-linux-amd64.tar.gz 解壓後,再解壓其中的 kubernetes-src.tar.gz 文件。dashboard 對應的目錄是:cluster/addons/dashboard ,拷貝dashboard的文件

mkdir -p /etc/kubernetes/dashboard

cp -a /opt/kubernetes/src/cluster/addons/dashboard/{dashboard-configmap.yaml,dashboard-controller.yaml,dashboard-rbac.yaml,dashboard-secret.yaml,dashboard-service.yaml} /etc/kubernetes/dashboard

#2.修改配置文件
sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1@g" /etc/kubernetes/dashboard/dashboard-controller.yaml
sed -i "/spec/a\  type: NodePort" /etc/kubernetes/dashboard/dashboard-service.yaml
sed -i "/targetPort/a\    nodePort: 32700" /etc/kubernetes/dashboard/dashboard-service.yaml

#3.執行全部定義文件
kubectl create -f /etc/kubernetes/dashboard

#4.查看分配的NodePort
kubectl -n kube-system get all -o wide
#
NAME                                        READY   STATUS    RESTARTS   AGE
pod/coredns-8854569d4-5vshp                 1/1     Running   0          119m
pod/kubernetes-dashboard-7d5f7c58f5-mr8zn   1/1     Running   0          5m1s
#
NAME                           TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                  AGE
service/kube-dns               ClusterIP   10.254.0.2     <none>        53/UDP,53/TCP,9153/TCP   142m
service/kubernetes-dashboard   NodePort    10.254.63.16   <none>        443:32700/TCP            51s
#
NAME                                   READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns                1/1     1            1           119m
deployment.apps/kubernetes-dashboard   1/1     1            1           5m4s
#
NAME                                              DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-8854569d4                 1         1         1       119m
replicaset.apps/kubernetes-dashboard-7d5f7c58f5   1         1         1       5m4s

kubectl -n kube-system describe pod kubernetes-dashboard

#NodePort映射到dasrd pod 443端口;
#dashboard的 --authentication-mode 支持 token、basic,默認爲 token。若是使用 basic,則 kube-apiserver 必須配置 '--authorization-mode=ABAC' 和 '--basic-auth-file' 參數。

#5.查看 dashboard 支持的命令行參數
kubectl exec --namespace kube-system -it kubernetes-dashboard-7d5f7c58f5-mr8zn -- /dashboard --help

#6.訪問dashboard
# 爲了集羣安全,從1.7開始,dashboard只容許經過https訪問,若是使用kube proxy則必須監聽localhost或 127.0.0.1,對於NodePort沒有這個限制,可是僅建議在開發環境中使用。對於不知足這些條件的登陸訪問,在登陸成功後瀏覽器不跳轉,始終停在登陸界面。
參考1:https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
參考2:https://github.com/kubernetes/dashboard/issues/2540
# 三種訪問 dashboard 的方式
# 經過NodePort訪問dashboard:
# 經過kubectl proxy訪問dashboard:
# 經過kube-apiserver訪問dashboard;

#7.經過NodePort訪問dashboard
# kubernetes-dashboard服務暴露了NodePort,可使用http://NodeIP:NodePort地址訪問dashboard;

#8.經過 kubectl proxy 訪問 dashboard
#啓動代理:
kubectl proxy --address='localhost' --port=8086 --accept-hosts='^*$'
# --address 必須爲 localhost 或 127.0.0.1;
# 須要指定 --accept-hosts 選項,不然瀏覽器訪問 dashboard 頁面時提示 「Unauthorized」;
# 瀏覽器訪問 URL:http://127.0.0.1:8086/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

#9.經過 kube-apiserver 訪問 dashboard
# 獲取集羣服務地址列表:
kubectl cluster-info
# 必須經過 kube-apiserver 的安全端口(https)訪問 dashbaord,訪問時瀏覽器須要使用自定義證書,不然會被 kube-apiserver 拒絕訪問。
# 建立和導入自定義證書的步驟,參考:A.瀏覽器訪問kube-apiserver安全端口
# 瀏覽器訪問 URL:https://192.168.10.100:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

#10.建立登陸 Dashboard 的 token 和 kubeconfig 配置文件
# 上面提到,Dashboard 默認只支持 token 認證,因此若是使用 KubeConfig 文件,須要在該文件中指定 token,不支持使用 client 證書認證。

# 建立登陸 token,訪問 dashboard時使用
kubectl create sa dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
echo ${DASHBOARD_LOGIN_TOKEN}

#使用輸出的 token 登陸 Dashboard。

#建立使用 token 的 KubeConfig 文件
cd /root/ssl
#設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=ca.pem \
  --embed-certs=true \
  --server=https://192.168.10.100:8443 \
  --kubeconfig=dashboard.kubeconfig

#設置客戶端認證參數,使用上面建立的 Token
kubectl config set-credentials dashboard_user \
  --token=${DASHBOARD_LOGIN_TOKEN} \
  --kubeconfig=dashboard.kubeconfig

#設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=dashboard_user \
  --kubeconfig=dashboard.kubeconfig

#設置默認上下文
kubectl config use-context default --kubeconfig=dashboard.kubeconfig

#生成的 dashboard.kubeconfig 登陸 Dashboard。
#因爲缺乏 Heapster 插件,當前 dashboard 不能展現 Pod、Nodes 的 CPU、內存等統計數據和圖表;

2.9.三、部署heapster插件

Heapster是一個收集者,將每一個Node上的cAdvisor的數據進行彙總,而後導到第三方工具(如InfluxDB)。Heapster 是經過調用 kubelet 的 http API 來獲取 cAdvisor 的 metrics 數據的。因爲 kublet 只在 10250 端口接收 https 請求,故須要修改 heapster 的 deployment 配置。同時,須要賦予 kube-system:heapster ServiceAccount 調用 kubelet API 的權限。

參考:配置 heapster:https://github.com/kubernetes/heapster/blob/master/docs/source-configuration.md

heapster下載地址:https://github.com/kubernetes-retired/heapster/releases

#1.解壓heapster
mkdir /opt/heapster
tar -xzvf heapster-1.5.4.tar.gz -C /opt/heapster

#2.修改配置
mkdir -p /etc/kubernetes/heapster
cp -a /opt/heapster/deploy/kube-config/influxdb/{grafana.yaml,heapster.yaml,influxdb.yaml} /etc/kubernetes/heapster

sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-grafana-amd64:v4.4.3@g" /etc/kubernetes/heapster/grafana.yaml

sed -i "67a\  type: NodePort" /etc/kubernetes/heapster/grafana.yaml

sed -i "/targetPort/a\    nodePort: 32699" /etc/kubernetes/heapster/grafana.yaml

sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-amd64:v1.5.3@g" /etc/kubernetes/heapster/heapster.yaml

# 因爲 kubelet 只在 10250 監聽 https 請求,故添加相關參數;
sed -i "s@source=.*@source=kubernetes:https://kubernetes.default?kubeletHttps=true\&kubeletPort=10250@g" /etc/kubernetes/heapster/heapster.yaml

sed -i "s@image:.*@image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-influxdb-amd64:v1.3.3@g" /etc/kubernetes/heapster/influxdb.yaml

# 將 serviceAccount kube-system:heapster 與 ClusterRole system:kubelet-api-admin 綁定,授予它調用 kubelet API 的權限;
cp -a /opt/heapster/deploy/kube-config/rbac/heapster-rbac.yaml /etc/kubernetes/heapster

cat > /etc/kubernetes/heapster/heapster-rbac.yaml <<EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: heapster-kubelet-api
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kubelet-api-admin
subjects:
- kind: ServiceAccount
  name: heapster
  namespace: kube-system
EOF

#3.執行全部定義文件
kubectl create -f  /etc/kubernetes/heapster
kubectl apply -f  /etc/kubernetes/heapster/heapster-rbac.yaml

#4.檢查執行結果
kubectl -n kube-system get all -o wide | grep -E 'heapster|monitoring'

kubectl -n kube-system describe pod heapster

kubectl -n kube-system describe pod monitoring

# 檢查 kubernets dashboard 界面,能夠正確顯示各 Nodes、Pods 的 CPU、內存、負載等統計數據和圖表:

kubectl -n kube-system get all -o wide

kubectl -n kube-system logs heapster-7bdc95b5cc-8h7zt

#5.訪問 grafana,經過 NodePort 訪問:
kubectl get svc -n kube-system|grep -E 'monitoring|heapster'
#顯示以下,grafana 監聽 NodePort 32699;
heapster               ClusterIP   10.254.159.62    <none>        80/TCP                   12m     k8s-app=heapster
monitoring-grafana     NodePort    10.254.167.38    <none>        80:32699/TCP             4m29s   k8s-app=grafana
monitoring-influxdb    ClusterIP   10.254.155.141   <none>        8086/TCP                 12m     k8s-app=influxdb

kubectl get pod -n kube-system -o wide |grep -E 'monitoring|heapster' 
#顯示以下,而後瀏覽器訪問 URL:http://192.168.10.16:32699/?orgId=1
heapster-7bdc95b5cc-8h7zt               1/1     Running   0          13m     172.30.34.4    192.168.10.15
monitoring-grafana-6cf5948cd4-rstxk     1/1     Running   0          5m      172.30.85.11   192.168.10.16
monitoring-influxdb-7d6c5fb944-qfd65    1/1     Running   0          13m     172.30.85.10   192.168.10.16

#6.經過 kube-apiserver 訪問: 獲取 monitoring-grafana 服務 URL:
kubectl cluster-info    
#查到瀏覽器訪問URL:https://192.168.10.100:8443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy

#經過 kubectl proxy 訪問:建立代理
kubectl proxy --address='192.168.10.16' --port=8086 --accept-hosts='^*$'
# 瀏覽器訪問 URL:http://192.168.10.16:8086/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/?orgId=1
相關文章
相關標籤/搜索