k8s部署容器

一. k8s節點部署

1. 環境規劃

  • 系統環境概述
系統環境說明
操做系統 Ubuntu16.04 or CentOS7 選的是CentOS7        -       
Kubernetes版本 v1.14.3 -        -       
Docker版本 19.03.1 yum安裝        -       
  • 組件服務TLS證書對應關係表
集羣部署-自籤TLS證書
組件 使用的證書        -       
etcd ca.pem, server.pem,server-key.pem        -       
kube-apiserver ca.pem, server.pem, server-key.pem        -       
flanneld ca.pem, server.pem, server-key.pem        -       
kube-controller-manager ca.pem, ca-key.pem        -       
kubelet ca.pem, ca-key.pem        -       
kube-proxy ca.pem, kube-proxy.pem, kube-proxy-key.pem        -       
kubectl ca.pem, admin.pem, admin-key,pem        -       
  • 服務器ip對應角色關係表
角色 IP 組件
k8s-master 192.168.10.21 kube-apiserver   kube-controller-manager   kube-scheduler   docker
k8s-node01 192.168.10.22 etcd  kubelet   kube-proxy   docker
k8s-node02 192.168.10.23 etcd  kubelet   kube-proxy   docker
k8s-node02 192.168.10.23 etcd  kubelet   kube-proxy   docker

2. Etcd數據庫集羣部署

2.1 修改主機別名,並配置互信

cat >> /etc/hosts  << EOF
192.168.10.21  k8s-master
192.168.10.22  k8s-node01
192.168.10.23  k8s-node02
192.168.10.24  k8s-node03
EOF

master節點爲例:(其餘節點參照便可)   
ssh-keygen
ssh-copy-id  k8s-node01

2.2 三個節點互相加一下規則

開啓防火牆
systemctl    start   firewalld
firewall-cmd --permanent --add-rich-rule="rule family=ipv4 source address=192.168.10.21 accept"  
firewall-cmd --permanent --add-rich-rule="rule family=ipv4 source address=192.168.10.22 accept"  
firewall-cmd --permanent --add-rich-rule="rule family=ipv4 source address=192.168.10.23 accept"
firewall-cmd --permanent --add-rich-rule="rule family=ipv4 source address=192.168.10.24 accept" 
firewall-cmd --permanent --add-rich-rule="rule family=ipv4 source address=192.168.1.106 accept"

systemctl    daemon-reload
systemctl    restart firewalld

2.3 生成證書

  • 拷貝k8s-master節點所需二進制包
#拷貝k8s-master節點所需二進制包
mkdir -p /app/kubernetes/{bin,cfg,ssl};
\cp  ./server/bin/{kube-apiserver,kube-scheduler,kube-controller-manager,kubectl} /app/kubernetes/bin;

#建立環境變量
echo "export PATH=$PATH:/app/kubernetes/bin" >> /etc/profile;
source /etc/profile;

#下載生成TLS證書的二進制工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64;
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64;
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64;
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64;
mv cfssl_linux-amd64          /usr/local/bin/cfssl;
mv cfssljson_linux-amd64      /usr/local/bin/cfssljson;
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo;
  • 批量建立證書的執行配置腳本
  • cat certificate.sh
#!/bin/bash
#
cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json  << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {

            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
        "O": "k8s",
        "OU": "System"
        }
    ]
}
EOF

cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cat > admin-csr.json << EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "192.168.10.21",
      "192.168.10.22",
      "192.168.10.23",
      "192.168.10.24",
      "10.10.10.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
        "O": "k8s",
        "OU": "System"
        }
    ]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json     | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json      | cfssljson -bare admin
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
\cp  *pem /app/kubernetes/ssl/
  • 執行該證書腳本
sh  certificate.sh
  • 其餘node節點,k8s-node0一、k8s-node02和k8s-node03建立目錄
mkdir -p /app/kubernetes/{bin,cfg,ssl}
  • master節點上將TLS證書拷貝到node節點
for i in   1 2 3   ; do scp *pem  k8s-node0$i:/app/kubernetes/ssl/; done
//作了互信認證
//證書包含了master節點和node節點所需TLS證書

2.4 配置etcd

https://github.com/coreos/etcd/releases/tag/v3.3.12html

  • 全部節點,拷貝etcd二進制包
tar -xf etcd-v3.3.12-linux-amd64.tar.gz
for i in 1 2 3 ;do scp etcd-v3.3.12-linux-amd64/{etcd,etcdctl} k8s-node0$i:/app/kubernetes/bin/; done
  • 執行配置腳本
  • cat etcd.sh 配置文件的腳本待肯定
#!/bin/bash
#
#master節點部署etcd01節點名稱和ip配置以下
k8s_node01=192.168.10.22
k8s_node02=192.168.10.23
k8s_node03=192.168.10.24

cat > /app/kubernetes/cfg/etcd << EOF 
KUBE_ETCD_OPTS="                                   \\
--name=etcd03                                      \\
--data-dir=/var/lib/etcd/default.etcd              \\
--listen-peer-urls=https://${k8s_node01}:2380      \\
--listen-client-urls=https://${k8s_node01}:2379,http://127.0.0.1:2379 \\
--advertise-client-urls=https://${k8s_node01}:2379                    \\
--initial-advertise-peer-urls=https://${k8s_node01}:2380              \\
--initial-cluster='etcd01=https://${k8s_node01}:2380,etcd02=https://${k8s_node01}:2380,etcd03=https://${k8s_node02}:2380' \\
--initial-cluster-token=etcd-cluster               \\
--initial-cluster-state=new                        \\
--cert-file=/app/kubernetes/ssl/server.pem         \\
--key-file=/app/kubernetes/ssl/server-key.pem      \\
--peer-cert-file=/app/kubernetes/ssl/server.pem    \\
--peer-key-file=/app/kubernetes/ssl/server-key.pem \\
--trusted-ca-file=/app/kubernetes/ssl/ca.pem       \\
--peer-trusted-ca-file=/app/kubernetes/ssl/ca.pem"
EOF

#配置etcd啓動腳本服務
cat  > /usr/lib/systemd/system/etcd.service   << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/app/kubernetes/cfg/etcd
ExecStart=/app/kubernetes/bin/etcd   \$KUBE_ETCD_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start  etcd
systemctl enable etcd
systemctl status etcd

其餘node節點,部署方法相同,只需修改etcd節點的名稱和IP,執行腳本便可!node

  • 查看etcd集羣狀態
/app/kubernetes/bin/etcdctl --ca-file=/app/kubernetes/ssl/ca.pem     \
    --cert-file=/app/kubernetes/ssl/server.pem                       \
    --key-file=/app/kubernetes/ssl/server-key.pem                    \
    --endpoints="https://192.168.10.22:2379,https:192.168.10.23:2379,https://192.168.10.24:2379" cluster-health

member 445a7d567d5cea7f is healthy: got healthy result from https://192.168.1.230:2379
member a04dd241344fb42a is healthy: got healthy result from https://192.168.1.240:2379
member e5160a05dd6cb2ed is healthy: got healthy result from https://192.168.1.226:2379
cluster is healthy

//出現cluster is healthy 說明集羣狀態是正常的

3. 部署全部節點docker服務

  • 執行配置腳本安裝docker服務
  • cat docker_install.sh
cat > docker_install.sh << EOF
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce

#安裝docker tab鍵補全
yum install -y bash-completion
source /usr/share/bash-completion/completions/docker
source /usr/share/bash-completion/bash_completion

systemctl start  docker
systemctl enable docker
systemctl status docker
EOF
  • 拷貝腳本到其餘節點,執行安裝,master可選擇性安裝不影響使用,安裝後,能與node節點上的容器通訊。

4. 部署flannel容器網絡

https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gzlinux

  • master節點操做:
  • 全部node節點上,拷貝flannel二進制包
tar xf flannel-v0.11.0-linux-amd64.tar.gz
for i in 1 2 3;do scp flanneld  mk-docker-opts.sh k8s-node0$i:/app/kubernetes/bin/;done
  • 執行配置腳本
  • cat flanneld.sh
#!/bin/bash 
#
k8s_node01=192.168.10.22
k8s_node02=192.168.10.23
k8s_node03=192.168.10.24

#寫入分配的子網段到etcd,供flanneld使用,須要先執行,方能啓動flannel服務
/app/kubernetes/bin/etcdctl                     \
  --ca-file=/app/kubernetes/ssl/ca.pem          \
  --cert-file=/app/kubernetes/ssl/server.pem    \
  --key-file=/app/kubernetes/ssl/server-key.pem \
  --endpoints="https://${k8s_node01}:2379,https://${k8s_node02}:2379,https://${k8s_node03}:2379" \
  set /coreos.com/network/config '{"Network": "172.50.0.0/16", "Backend": {"Type": "vxlan"}}'

#配置Flannel:
cat > /app/kubernetes/cfg/flanneld << EOF 
flannel_options="                              \\
--etcd-endpoints=https://${k8s_node01}:2379,https://${k8s_node02}:2379,https://${k8s_node03}:2379 \\
-etcd-cafile=/app/kubernetes/ssl/ca.pem        \\
-etcd-certfile=/app/kubernetes/ssl/server.pem  \\
-etcd-keyfile=/app/kubernetes/ssl/server-key.pem"
EOF

#systemd管理Flannel:
cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/app/kubernetes/cfg/flanneld
ExecStart=/app/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/app/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

#配置Docker啓動指定子網段:
cat > /usr/lib/systemd/system/docker.service <<  EOF    
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd  \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start   flanneld
systemctl enable  flanneld
systemctl restart docker  
systemctl status  flanneld
  • 獲取建立的網絡(驗證用)
/app/kubernetes/bin/etcdctl                    \
 --ca-file=/app/kubernetes/ssl/ca.pem          \
 --cert-file=/app/kubernetes/ssl/server.pem    \
 --key-file=/app/kubernetes/ssl/server-key.pem \
 --endpoints="https://192.168.10.22:2379,https://192.168.10.23:2379,https://192.168.10.24:2379" \
 get /coreos.com/network/config '{"Network": "172.50.0.0/16", "Backend": {"Type": "vxlan"}}'

其餘節點安裝flanneld的方式相同。
測試不一樣節點互通,在當前節點訪問另外一個Node節點docker0 IP便可nginx

  • 注意事項:
1) 保證etcd通訊,集羣狀態是正常的
2) 要先添加子網段,不然flannel啓動報錯
3) 要確保docker0與flannel.1在同一網段,若不在,須要從新加載啓動下docker服務,默認的docker0網段的ip涉及到的環境變量,在/run/flannel/subnet.env
    這個文件裏,是由第一步"寫入子網段到etcd集羣"中時,自動生成的,可修改。

5. 獲取token文件

1) 建立TLS Bootstrapping Token
2) 建立kubelet kubeconfig
3) 建立kube-proxy kubeconfiggit

在master節點上操做,而後將生成的配置文件拷貝到node節點上面github

  • 執行配置腳本
  • cat kubeconfig.sh
#!/bin/bash
#
# 建立 TLS Bootstrapping Token,token的字符能夠隨機的生成
export  BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > token.csv << EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

# 建立kubelet bootstrapping kubeconfig 
KUBE_APISERVER="https://192.168.10.21:8080"

# 設置集羣參數
kubectl config set-cluster kubernetes \
    --certificate-authority=/app/kubernetes/ssl/ca.pem  \
    --embed-certs=true                \
    --server=${KUBE_APISERVER}        \
    --kubeconfig=bootstrap.kubeconfig

# 設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \
    --token=${BOOTSTRAP_TOKEN}        \
    --kubeconfig=bootstrap.kubeconfig

# 設置上下文參數
kubectl config set-context default    \
    --cluster=kubernetes              \
    --user=kubelet-bootstrap          \
    --kubeconfig=bootstrap.kubeconfig

# 設置默認上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#--------------------------------
# 建立kube-proxy kubeconfig文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/app/kubernetes/ssl/ca.pem \
  --embed-certs=true                  \
  --server=${KUBE_APISERVER}          \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=./kube-proxy.pem   \
  --client-key=./kube-proxy-key.pem       \
  --embed-certs=true                      \
  --kubeconfig=bootstrap.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes             \
  --user=kube-proxy                \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
  • 生成node節點所需配置文件以及token文件
chmod +x kubeconfig.sh  ; 
sh  kubeconfig.sh ;
//會生成 token.csv,bootstrap.kubeconfig,kube-proxy.kubeconfig,這三個文件,後面會用到

6. 部署Master節點組件

kubernetes master 節點包含的組件web

  • kube-apiserver
  • kube-scheduler
  • kube-controller-manager

目前這三個組件須要部署在同一臺機器上docker

  • kube-scheduler、kube-controller-manager 和 kube-apiserver 三者的功能緊密相關;
  • 同時只能有一個 kube-scheduler、kube-controller-manager 進程處於工做狀態,若是運行多個,則須要經過選舉產生一個 leader;

步驟簡介:數據庫

  • 拷貝二進制包啓動服務命令;
  • 拷貝token文件和須要的證書文件;
  • 配置並執行"apiserver.sh 腳本;
  • 驗證kube-apiserver服務;

6.1 配置和啓動 kube-apiserver 組件

  • 二進制包包含客戶端的全部組件

https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.mdjson

  • 執行配置腳本
  • cat apiserver.sh
#!/bin/bash
#
k8s_master=192.168.10.21
k8s_node01=192.168.10.22
k8s_node02=192.168.10.23
k8s_node03=192.168.10.24

ETCD_SERVER="https://${k8s_node01}:2379,https://${k8s_node02}:2379,https://${k8s_node03}:2379"

\cp  token.csv  /app/kubernetes/cfg

cat > /app/kubernetes/cfg/kube-apiserver << EOF
KUBE_APISERVER_OPTS="--logtostderr=true  \\
--v=4                                    \\
--etcd-servers=$ETCD_SERVER              \\
--bind-address=$k8s_master               \\
--secure-port=8080                       \\
--advertise-address=$k8s_master          \\
--allow-privileged=true                  \\
--service-cluster-ip-range=10.10.10.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node           \\
--enable-bootstrap-token-auth            \\
--token-auth-file=/app/kubernetes/cfg/token.csv           \\
--service-node-port-range=30000-50000                     \\
--tls-cert-file=/app/kubernetes/ssl/server.pem            \\
--tls-private-key-file=/app/kubernetes/ssl/server-key.pem \\
--client-ca-file=/app/kubernetes/ssl/ca.pem               \\
--service-account-key-file=/app/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/app/kubernetes/ssl/ca.pem                  \\
--etcd-certfile=/app/kubernetes/ssl/server.pem            \\
--etcd-keyfile=/app/kubernetes/ssl/server-key.pem"
EOF

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/app/kubernetes/cfg/kube-apiserver
ExecStart=/app/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start  kube-apiserver
systemctl enable kube-apiserver
systemctl status kube-apiserver
  • 正常查看集羣狀態以下,kubectl get cs 只能在master節點上執行檢查狀態
[root@localhost kubernetes]# kubectl  get cs 
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}

6.2 配置和啓動 kube-controller-manager 組件

  • 執行配置腳本
  • cat controller-manager.sh
#!/bin/bash
cat  > /app/kubernetes/cfg/kube-controller-manager << EOF
KUBE_CONTROLLER_MANAGER_OPTS="                   \\
--logtostderr=true                               \\
--v=4                                            \\
--master=127.0.0.1:8080                          \\
--leader-elect=true                              \\
--address=127.0.0.1                              \\
--service-cluster-ip-range=10.10.10.0/24         \\
--cluster-name=kubernetes                        \\
--cluster-signing-cert-file=/app/kubernetes/ssl/ca.pem     \\
--cluster-signing-key-file=/app/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/app/kubernetes/ssl/ca.pem                  \\
--service-account-private-key-file=/app/kubernetes/ssl/ca-key.pem"
EOF

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/app/kubernetes/cfg/kube-controller-manager
ExecStart=/app/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl  daemon-reload
systemctl  start   kube-controller-manager
systemctl  enable  kube-controller-manager
systemctl  status  kube-controller-manager

6.3 配置和啓動 kube-scheduler 組件

  • 執行配置腳本
  • cat scheduler.sh
#!/bin/bash
#
cat > /app/kubernetes/cfg/kube-scheduler << EOF
KUBE_SCHEDULER_OPTS="   \\
--logtostderr=true      \\
--v=4                   \\
--master=127.0.0.1:8080 \\
--leader-elect"
EOF

cat > /usr/lib/systemd/system/kube-scheduler.service  << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/app/kubernetes/cfg/kube-scheduler
ExecStart=/app/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start  kube-scheduler
systemctl enable kube-scheduler
systemctl status kube-scheduler

7. 部署Node節點組件

Kubernetes node節點包含以下組件

  • Flanneld:安裝過程參考文檔上面安裝flannel網絡。
  • kubelet:直接用二進制文件安裝
  • kube-proxy:直接用二進制文件安裝

步驟簡介:

  • 確認在以前咱們安裝配置的網絡插件flannel已啓動且運行正常
  • 安裝配置docker後啓動
  • 安裝配置kubelet、kube-proxy後啓動
  • 驗證

master節點操做:
拷貝node節點kubelet和kube-proxy服務所需配置文件

for i in 1 2 3 ; do scp bootstrap.kubeconfig kube-proxy.kubeconfig  k8s-node0$i:/app/kubernetes/cfg/; done
for i in 1 2 3 ; do scp ./server/bin/{kubelet,kube-proxy}           k8s-node0$i:/app/kubernetes/bin/; done  
證書一開始已經拷貝過去了

執行將kubelet-bootstrap用戶綁定到系統集羣角色

kubectl create clusterrolebinding kubelet-bootstrap \
  --clusterrole=system:node-bootstrapper            \
  --user=kubelet-bootstrap

7.1 配置和啓動 kubelet 組件

  • k8s-node01節點操做:
  • 執行配置腳本
  • cat kubelet.sh
#!/bin/bash
#以下是node01爲例
k8s_node01=192.168.10.22
k8s_node02=192.168.10.23
k8s_node03=192.168.10.24

#配置kubelet
cat > /app/kubernetes/cfg/kubelet << EOF
KUBELET_OPTS="--logtostderr=true              \\
  --v=4                                       \\
  --hostname-override=${k8s_node01}           \\
  --address=${k8s_node01}                     \\
  --kubeconfig=/app/kubernetes/cfg/kubelet.kubeconfig                          \\
  --experimental-bootstrap-kubeconfig=/app/kubernetes/cfg/bootstrap.kubeconfig \\
  --allow-privileged=true                     \\
  --cert-dir=/app/kubernetes/ssl              \\
  --cluster-dns=10.10.10.2                    \\
  --cluster-domain=cluster.local              \\
  --fail-swap-on=false                        \\
  --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF

#配置kubelet服務system啓動
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=/app/kubernetes/cfg/kubelet
ExecStart=/app/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start  kubelet
systemctl enable kubelet
systemctl status kubelet

其餘k8s-node02節點部署方法相同,改下節點ip便可

  • 附: 其中/app/kubernetes/cfg/kubelet.config配置文件以下:
//無需手動配置,瞭解下便可。正常啓動後,會自動加載進來,配置以下

cat /app/kubernetes/cfg/kubelet.config

apiVersion: v1
clusters:
- cluster:
    certificate-authority-data:  ……………很長字符串,省略…………… 
    server: https://192.168.10.21:8080
  name: default-cluster
contexts:
- context:
    cluster: default-cluster
    namespace: default
    user: default-auth
  name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: default-auth
  user:
    client-certificate: /app/kubernetes/ssl/kubelet-client-current.pem
    client-key: /app/kubernetes/ssl/kubelet-client-current.pem
  • 查看未受權的CSR請求
$ kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-Cm3XIZb_R6fEV1bbT9N2ufxuDAXkf05-8mnUjWbh6eo   67s   kubelet-bootstrap   Pending
node-csr-Jp_oHiFFO4ZTRKcKaIzKXiyKIIAZ2c4e09ne8I-VU90   65s   kubelet-bootstrap   Pending
node-csr-bTrFC53MHuzspJQUlyYTsESLpQe4TlFnlUtmyiMASjY   67s   kubelet-bootstrap   Pending
$ kubectl get nodes
No resources found.
  • 經過 CSR 請求並驗證
# kubectl certificate approve [NAME1,NAME2,NAME3]                 //能夠同時給多個節點名進行受權認證
certificatesigningrequest "node-csr-Cm3XIZb_R6fEV1bbT9N2ufxuDAXkf05-8mnUjWbh6eo" approved

# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-Cm3XIZb_R6fEV1bbT9N2ufxuDAXkf05-8mnUjWbh6eo   92s   kubelet-bootstrap   Approved,Issued
node-csr-Jp_oHiFFO4ZTRKcKaIzKXiyKIIAZ2c4e09ne8I-VU90   90s   kubelet-bootstrap   Approved,Issued
node-csr-bTrFC53MHuzspJQUlyYTsESLpQe4TlFnlUtmyiMASjY   92s   kubelet-bootstrap   Approved,Issued

7.2 配置和啓動 kube-proxy 組件

  • k8s-node01節點操做:
  • 執行配置腳本
  • cat kube-proxy.sh
#!/bin/bash
#以下是以node01爲例
k8s_node01=192.168.10.22
k8s_node02=192.168.10.23
k8s_node03=192.168.10.24

#配置kube-proxy
cat > /app/kubernetes/cfg/kube-proxy << EOF
KUBE_PROXY_OPTS="                   \\
--logtostderr=true                  \\
--v=4                               \\
--hostname-override=${k8s_node01}   \\
--cluster-cidr=10.10.10.0/24        \\
--kubeconfig=/app/kubernetes/cfg/kube-proxy.kubeconfig"
EOF

#systemd管理kube-proxy組件
cat  > /usr/lib/systemd/system/kube-proxy.service << EOF 
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/app/kubernetes/cfg/kube-proxy
ExecStart=/app/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target  
EOF

systemctl daemon-reload
systemctl enable kube-proxy
systemctl start  kube-proxy
systemctl status kube-proxy
  • 若kube-proxy服務有錯誤日誌以下
Sep 20 09:35:16 k8s-node01 kube-proxy[25072]: E0920 09:35:16.077775   25072 reflector.go:126]
k8s.io/client-go/informers/factory.go:133: Failed to list *v1.Service: services is forbidden: User "system:anonymous
" cannot list resource "services" in API group "" at the cluster scope
  • 解決方法:須要綁定一個cluster-admin的權限
  • 不然映射的端口外網是訪問不到的,權限不足
kubectl create clusterrolebinding system:anonymous   --clusterrole=cluster-admin   --user=system:anonymous
  • 其餘k8s-node02節點部署方式同樣,修改下對應的節點ip便可

8. 查看k8s集羣狀態

# kubectl get node
NAME            STATUS   ROLES    AGE   VERSION
192.168.10.22   Ready    <none>   15s   v1.14.3
192.168.10.23   Ready    <none>   17s   v1.14.3
192.168.10.24   Ready    <none>   17s   v1.14.3

附:該STATUS 狀態值由'NotReady' --->  'Ready',說明集羣節點添加成功

[root@k8s-master ~]# 
[root@k8s-master ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}

9. 運行nginx測試容器

  • 建立一個nginx web服務,測試集羣是否正常
1)命令運行測試容器:
kubectl run nginx --image=nginx --replicas=3

2)啓用yaml文件啓動測試容器:
cat > nginx.yaml << EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx
spec:
  replicas: 3
  template:
    metadata:
      labels:
        run: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80
EOF

運行容器:
kubectl   create   -f  nginx.yaml 

暴露映射外部端口,用來訪問:
kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
  • 查看Pod,Service:
[root@k8s-master ~]# kubectl get pods
NAME                     READY   STATUS    RESTARTS   AGE
nginx-7db9fccd9b-5njsv   1/1     Running   0          45m
nginx-7db9fccd9b-tjz6z   1/1     Running   0          45m
nginx-7db9fccd9b-xtkdx   1/1     Running   0          45m
[root@k8s-master ~]# 
[root@k8s-master ~]# 
[root@k8s-master ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.10.10.1    <none>        443/TCP        47m
nginx        NodePort    10.10.10.73   <none>        88:49406/TCP   3m38s   
[root@k8s-master ~]#
  • 訪問node節點網址:
192.168.10.22:49406
192.168.10.23:49406
192.168.10.24:49406

訪問結果以下:
[root@k8s-master ~]# curl -I  192.168.10.23:49406
HTTP/1.1 200 OK
Server: nginx/1.17.3
Date: Fri, 20 Sep 2019 01:58:34 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 13 Aug 2019 08:50:00 GMT
Connection: keep-alive
ETag: "5d5279b8-264"
Accept-Ranges: bytes

附: 該代理端口在node節點上是能夠查到的

10. 部署kubedns組件

  • 管理節點配置並修改kube-dns.yaml文件
  • clusterIP與kubelet啓動參數--cluster-dns一致便可,在service cidr中預選1個地址作dns地址

  • 修改後配置

cat  > kube-dns.yaml <<  EOF
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.10.10.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      containers:
      - name: kubedns
        image: netonline/k8s-dns-kube-dns-amd64:1.14.8
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local.
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: netonline/k8s-dns-dnsmasq-nanny-amd64:1.14.8
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --no-negcache
        - --log-facility=-
        - --server=/cluster.local./127.0.0.1#10053
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: netonline/k8s-dns-sidecar-amd64:1.14.8
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,SRV
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,SRV
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don't use cluster DNS.
      serviceAccountName: kube-dns  
EOF
  • 查看配置說明
kube-dns ServiceAccount不用修改,kubernetes集羣預約義的ClusterRoleBinding system:kube-dns已將kube-system(系統服務通常部署在此)namespace中的
ServiceAccout kube-dns 與預約義的ClusterRole system:kube-dns綁定,而ClusterRole system:kube-dns具備訪問kube-apiserver dns的api權限
  • 查看重要信息
# kubectl get clusterrolebinding system:kube-dns -o yaml
………… 省略
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-dns
subjects:
- kind: ServiceAccount
  name: kube-dns
  namespace: kube-system

# kubectl get clusterrole system:kube-dns -o yaml
………… 省略
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  verbs:
  - list
  - watch
  • 啓動kube-dns
kubectl create -f kube-dns.yaml
  • 若kube-dns執行有誤,想刪掉從新安裝
kubectl   get deployment --all-namespaces   //查看
kubectl   delete  -f  kube-dns.yaml         //清除

11. 驗證Kubedns組件服務

  • kube-dns Deployment & Service & Pod
  • kube-dns Pod 3個容器已」Ready」,服務,deployment等也正常啓動
kubectl get pod        -n kube-system -o wide
kubectl get service    -n kube-system -o wide
kubectl get deployment -n kube-system -o wide
  • kube-dns 查詢
  • 以下是查詢dns是否成功解析的信息操做
  • 默認的busybox鏡像有問題,選用一個便可
# cat  > busybox.yaml  << EOF
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    #image: zhangguanzhang/centos
    image: busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF

運行帶有工具的pod
# kubectl create  -f busybox.yaml

查看kube-dns的pod IP
# kubectl  get pod -n kube-system -o wide
NAME                                    READY   STATUS   AGE    IP            NODE            NOMINATED NODE   READINESS GATES
kube-dns-67fb7c784c-998xh               3/3     Running  174m   172.50.36.2   192.168.10.23   <none>           <none>
kubernetes-dashboard-8646f64494-5nzvs   1/1     Running  32d    172.50.32.3   192.168.10.24   <none>           <none>

$IP 爲查出來的dns  pod 的 IP
注: 應該有輸出信息的,若是沒有輸出,是其餘問題    
# kubectl exec -ti busybox -- nslookup kubernetes.default
  • kube-dns,有三組日誌
kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name | head -1) -c kubedns

kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name | head -1) -c dnsmasq

kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name | head -1) -c sidecar

12. 部署dashboard組件

  • 管理節點執行
  • 所用鏡像名以下
netonline/kubernetes-dashboard-amd64:v1.8.3    //以下配置,以此爲例
registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.7.1
  • 配置文件模板
# ConfigMap
wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-configmap.yaml

# Secret
wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-secret.yaml

# RBAC
wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-rbac.yaml

# dashboard-controller
wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-controller.yaml

# Service
wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dashboard/dashboard-service.yaml
  • 本實驗使用yaml文件(修改版)

https://github.com/Netonline2016/kubernetes/tree/master/addons/dashboard

12.1 修改dashboard-configmap.yaml

暫不修改,針對這次驗證,dashboard-controller也未使用到configmap

12.2 修改dashboard-rbac.yaml

  • 默認dashboard-rbac.yaml定義了1個name爲」kubernetes-dashboard-minimal」的Role;並作了name爲」kubernetes-dashboard-minimal」的RoleBinding,向name爲」kubernetes-dashboard」的ServiceAccount受權;
  • 但默認的dashboard-rbac.yaml定義的Role權限過小,不太方便驗證;
  • 從新定義rbac,只須要定義新的ClusterRoleBinding: kubernetes-dashboard,將kubernetes自身的具備所有權限的ClusterRole: cluster-admin賦予ClusterRoleBinding;此受權方式在生產環境慎用;

  • 修改後的配置

cat > dashboard-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding        ## 修改或新增部分
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole             ## 修改或新增部分
  name: cluster-admin           ## 修改或新增部分
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard  ## 修改或新增部分
namespace: kube-system
EOF

12.3 修改dashboard-secret.yaml

dashboard-secret.yaml不作修改

12.4 修改dashboard-controller.yaml

  • dashboard-controller.yaml定義了ServiceAccount資源(受權)與Deployment(服務Pod)
  • 修改該文件使用的鏡像
sed -i 's|k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3|netonline/kubernetes-dashboard-amd64:v1.8.3|g' dashboard-controller.yaml
  • 修改後的配置文件
cat > dashboard-controller.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard
  namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      containers:
      - name: kubernetes-dashboard
        image: netonline/kubernetes-dashboard-amd64:v1.8.3   ## 修改或新增部分
        resources:
          limits:
            cpu: 100m
            memory: 300Mi
          requests:
            cpu: 50m
            memory: 100Mi
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          # PLATFORM-SPECIFIC ARGS HERE
          - --auto-generate-certificates
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
        - name: tmp-volume
          mountPath: /tmp
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
EOF

12.5 修改dashboard-service.yaml

  • 定義」NodePort」 type,爲驗證經過控制節點直接訪問dashboard(生產環境中建議不使用 方式),」nodePort: 18443」定義具體的端口,不設置則在服務端口範圍中隨機產生

  • 修改後的配置文件

cat  > dashboard-service.yaml << EOF 
apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort
  ports:
  - port: 443
    targetPort: 8443
    nodePort: 38443
EOF

12.6 啓動kubernetes-dashboard並驗證服務

  • 啓動rbac,secret,controller,service4個yaml文件定義的服務便可;
  • 或者 kubectl create -f .
kubectl create -f dashboard-rbac.yaml 
kubectl create -f dashboard-secret.yaml 
kubectl create -f dashboard-controller.yaml 
kubectl create -f dashboard-service.yaml
  • 查看相關服務
  • 查看service,deployment,pod服務
# kubectl get svc -n kube-system
NAME                   TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
kube-dns               ClusterIP   10.10.10.2    <none>        53/UDP,53/TCP   23h
kubernetes-dashboard   NodePort    10.10.10.30   <none>        443:38443/TCP   92m

# kubectl get deployment -n kube-system
NAME                   READY   UP-TO-DATE   AVAILABLE   AGE
kube-dns               1/1     1            1           23h
kubernetes-dashboard   1/1     1            1           95m

# kubectl get pod -n kube-system
NAME                                    READY   STATUS    RESTARTS   AGE
kube-dns-5995c87955-dt76f               3/3     Running   0          23h
kubernetes-dashboard-8646f64494-6ttr4   1/1     Running   0          96m

獲取集羣服務列表
kubectl cluster-info

12.7 訪問dashboard方式

  • 查看nodePort,進行訪問UI
# kubectl   get svc -n kube-system
NAME                   TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
kube-dns               ClusterIP   10.10.10.2    <none>        53/UDP,53/TCP   23h
kubernetes-dashboard   NodePort    10.10.10.97   <none>        443:38443/TCP   22h
  • 查看dashboard服務運行所在節點服務器
#  kubectl get pod -n kube-system -o wide
NAME                                  READY STATUS  RESTARTS AGE IP          NODE          NOMINATED NODE READINESS GATES
kube-dns-5995c87955-j5z7n             3/3   Running 6        26h 172.50.94.2 192.168.10.23 <none>         <none>
kubernetes-dashboard-8646f64494-5nzvs 1/1   Running 2        25h 172.50.29.2 192.168.10.24 <none>         <none>
  • 訪問UI地址
//谷歌和IE對證書有要求,訪問異常,須要用火狐瀏覽器訪問,並添加url例外

https://192.168.10.24:38443
  • 建立管理員角色用戶
建立了一個admin-user的服務帳號,並放在kube-system命名空間下,並將cluster-admin角色綁定到admin-user帳戶,這樣admin-user帳戶就有了管理員的權限。
默認狀況下,kubeadm建立集羣時已經建立了cluster-admin角色,咱們直接綁定便可

# cat > dashboard-adminuser.yaml << EOF 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

# kubectl create -f dashboard-adminuser.yaml
  • 獲取管理員的token值
# kubectl get secret -n kube-system  
NAME                               TYPE                                  DATA   AGE
admin-user-token-g9tfb             kubernetes.io/service-account-token   3      164m
default-token-wgwk7                kubernetes.io/service-account-token   3      27h
kube-dns-token-x7skk               kubernetes.io/service-account-token   3      26h
kubernetes-dashboard-certs         Opaque                                0      25h
kubernetes-dashboard-key-holder    Opaque                                2      25h
kubernetes-dashboard-token-qrhhr   kubernetes.io/service-account-token   3      25h

# kubectl describe secret  -n kube-system admin-user-token-g9tfb
Name:         admin-user-token-g9tfb
…………
ca.crt:     1359 bytes
namespace:  11 bytes
token:
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWc5dGZiIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkYjE0Y2UxNi1kZTk4LTExZTktOGM3OC0wMDBjMjk2MGY2MWMiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.QcLgpvc_gX8ZID7EpIcw5wwJtHb6S2e8DvdB5j-69uAWDFe46KJvRBYdDVCkAEHm0GcZDO1oQbNb-bQi0FdVdgG9G_bVFxo_1-LygBb5Uudqa6antjISmd9Gx675raw-Lwa2BLt4Y4_zEPKGR3cu9Ri6MYJG6ecGp5Q4ev5Ne8adK711dSWne_WLO22nFkdT-yqhWYecppnGSqrUNsBsDGI83IuZzxMrAH-nm7qAdnWDY7SOBzpeEpn9NDiIlh6kIz1c6n7pvQDILb4we9RF2IB5g-vi3lklk4lJnKo2WSmGEeRn7dQ-vYmCQ82OSUTCWtWTNKAgVJeQfGvSXOkbOA

附:該token值爲登錄UI界面的令牌,拷貝進去便可,注意token值的空格

====

相關文章
相關標籤/搜索