全部節點都操做:3個master(etcd集羣三個節點)和2個nodenode
hostnamectl --static set-hostname ops-k8s-master01 hostnamectl --static set-hostname ops-k8s-master02 hostnamectl --static set-hostname ops-k8s-master03 hostnamectl --static set-hostname ops-k8s-node01 hostnamectl --static set-hostname ops-k8s-node02
本機作主機映射linux
cat <<EOF>>/etc/hosts 10.0.0.10 ops-k8s-master01 ops-k8s-master01.local.com 10.0.0.11 ops-k8s-master02 ops-k8s-master02.local.com 10.0.0.12 ops-k8s-master03 ops-k8s-master03.local.com 10.0.0.13 ops-k8s-node01 ops-k8s-node01.local.com 10.0.0.14 ops-k8s-node02 ops-k8s-node02.local.com
10.0.0.15 ops-k8s-harbor01 harbor01.local.com
10.0.0.16 ops-k8s-harbor02 harbor02.local.com
EOF
分發hosts文件到集羣其餘節點c++
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02 ops-k8s-harbor01 ops-k8s-harbor02;do scp /etc/hosts $i:/etc/;done
ssh-keygen #一路回車便可
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh-copy-id $i;done
停防火牆、關閉Swap、關閉Selinux、設置內核、安裝依賴包、配置ntp(配置完後建議重啓一次)git
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /opt/scripts/shell && exit";done
cat>/opt/scripts/shell/init_k8s_env.sh<<EOF #!/bin/bash #by wzs at 20180419 #auto install k8s #1.stop firewall systemctl stop firewalld systemctl disable firewalld #2.stop swap swapoff -a sed -i 's/.*swap.*/#&/' /etc/fstab #3.stop selinux setenforce 0 sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config #4.安裝基本包 yum install -y net-tools vim lrzsz tree screen lsof tcpdump wget tree nmap tree dos2unix nc traceroute telnet nfs-utils mailx pciutils ftp ksh lvm2 gcc gcc-c++ dmidecode kde-l10n-Chinese* lsof #5.set ntpdate systemctl enable ntpdate.service echo '*/30 * * * * /usr/sbin/ntpdate time7.aliyun.com >/dev/null 2>&1' > /tmp/crontab2.tmp crontab /tmp/crontab2.tmp systemctl start ntpdate.service #6.set security limit echo "* soft nofile 65536" >> /etc/security/limits.conf echo "* hard nofile 65536" >> /etc/security/limits.conf echo "* soft nproc 65536" >> /etc/security/limits.conf echo "* hard nproc 65536" >> /etc/security/limits.conf echo "* soft memlock unlimited" >> /etc/security/limits.conf echo "* hard memlock unlimited" >> /etc/security/limits.conf EOF
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /opt/scripts/shell/init_k8s_env.sh $i:/opt/scripts/shell/;done
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "/bin/bash /opt/scripts/shell/init_k8s_env.sh && exit";done
cd /etc/yum.repos.d/ wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce systemctl enable docker systemctl start docker systemctl status docker
補充:github
一、卸載老版本docker
yum list installed | grep docker yum -y remove docker*
##刪除容器和鏡像 rm -rf /var/lib/docker
#其餘節點操做
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "yum -y remove docker* && rm -rf /var/lib/docker && exit";done
二、安裝新版本shell
cat>install_docker.sh<<EOF #!/bin/sh ############################################################################### # #VARS INIT # ############################################################################### ############################################################################### # #Confirm Env # ############################################################################### date echo "## Install Preconfirm" echo "## Uname" uname -r echo echo "## OS bit" getconf LONG_BIT echo ############################################################################### # #INSTALL yum-utils # ############################################################################### date echo "## Install begins : yum-utils" yum install -y yum-utils >/dev/null 2>&1 if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Install ends : yum-utils" echo ############################################################################### # #Setting yum-config-manager # ############################################################################### echo "## Setting begins : yum-config-manager" yum-config-manager \ --add-repo \ https://download.docker.com/linux/centos/docker-ce.repo >/dev/null 2>&1 if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Setting ends : yum-config-manager" echo ############################################################################### # #Update Package Cache # ############################################################################### echo "## Setting begins : Update package cache" yum makecache fast >/dev/null 2>&1 if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Setting ends : Update package cache" echo ############################################################################### # #INSTALL Docker-engine # ############################################################################### date echo "## Install begins : docker-ce" yum install -y docker-ce if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Install ends : docker-ce" date echo ############################################################################### # #Stop Firewalld # ############################################################################### echo "## Setting begins : stop firewall" systemctl stop firewalld if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi systemctl disable firewalld if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Setting ends : stop firewall" echo ############################################################################### # #Clear Iptable rules # ############################################################################### echo "## Setting begins : clear iptable rules" iptables -F if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Setting ends : clear iptable rules" echo ############################################################################### # #Enable docker # ############################################################################### echo "## Setting begins : systemctl enable docker" systemctl enable docker if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Setting ends : systemctl enable docker" echo ############################################################################### # #start docker # ############################################################################### echo "## Setting begins : systemctl restart docker" systemctl restart docker if [ $? -ne 0 ]; then echo "Install failed..." exit 1 fi echo "## Setting ends : systemctl restart docker" echo ############################################################################### # #confirm docker version # ############################################################################### echo "## docker info" docker info echo echo "## docker version" docker version EOF
三、分發腳本到其餘節點並執行安裝express
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /opt/scripts/shell/install_docker.sh $i:/opt/scripts/shell/;done for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i " /bin/bash /opt/scripts/shell/install_docker.sh && exit";done
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /opt/kubernetes/{cfg,bin,ssl,log,yaml} && exit";done
目錄詳解apache
kubernetes/ ├── bin #二進制可執行文件存放目錄,設置環境變量 ├── cfg #配置管理目錄 ├── log #日誌管理目錄 ├── ssl #集羣證書存放目錄 └── yaml #yaml文件存放目錄 5 directories, 0 files
下載地址:https://pan.baidu.com/disk/home?#/all?vmode=list&path=%2Fsoftware%2Fsalt-kubernetesjson
cd /usr/local/src #將軟件包上傳 unzip -d /usr/local/src k8s-v1.10.1-manual.zip
在集羣全部節點執行
echo "PATH=$PATH:/opt/kubernetes/bin">>/root/.bash_profile source /root/.bash_profile
cd /usr/local/src wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl* mv cfssl-certinfo_linux-amd64 /opt/kubernetes/bin/cfssl-certinfo mv cfssljson_linux-amd64 /opt/kubernetes/bin/cfssljson mv cfssl_linux-amd64 /opt/kubernetes/bin/cfssl
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /opt/kubernetes/bin/cfssl* $i:/opt/kubernetes/bin/;done
#建立管理證書的目錄
cd /usr/local/src mkdir ssl && cd ssl cfssl print-defaults config > config.json cfssl print-defaults csr > csr.json
cat >ca-config.json<<EOF { "signing": { "default": { "expiry": "175200h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "175200h" } } } } EOF
cat >ca-csr.json<<EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca ls -l ca*
for i in in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r ca.csr ca.pem ca-key.pem ca-config.json $i:/opt/kubernetes/ssl/;done
etcd下載地址:https://github.com/coreos/etcd/releases/
cd /usr/local/src/ wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz tar xf etcd-v3.2.18-linux-amd64.tar.gz cd etcd-v3.2.18-linux-amd64 for i in in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp etcd etcdctl $i:/opt/kubernetes/bin/;done
cd /usr/local/src/ssl
cat>etcd-csr.json<<EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "10.0.0.10", "10.0.0.11", "10.0.0.12" ], "key": { "algo": "rsa", "size": 2048 }, "names": [{ "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" }] } EOF
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes etcd-csr.json | cfssljson -bare etcd #生成如下證書文件 ls -l etcd*
併發送證書到etcd集羣其餘節點
for i in in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp etcd*.pem $i:/opt/kubernetes/ssl/;done
cat>/opt/kubernetes/cfg/etcd.conf<<EOF #[member] ETCD_NAME="ops-k8s-master01" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" ETCD_LISTEN_PEER_URLS="https://10.0.0.10:2380" ETCD_LISTEN_CLIENT_URLS="https://10.0.0.10:2379,https://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" #[cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.10:2380" # if you use different ETCD_NAME (e.g. test), # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." ETCD_INITIAL_CLUSTER="ops-k8s-master01=https://10.0.0.10:2380,ops-k8s-master02=https://10.0.0.11:2380,ops-k8s-master03=https://10.0.0.12:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="https://10.0.0.10:2379" #[security] CLIENT_CERT_AUTH="true" ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" PEER_CLIENT_CERT_AUTH="true" ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" EOF
cat>/etc/systemd/system/etcd.service<<EOF [Unit] Description=Etcd Server After=network.target [Service] Type=simple WorkingDirectory=/var/lib/etcd EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf # set GOMAXPROCS to number of processors ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd" Type=notify [Install] WantedBy=multi-user.target EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /opt/kubernetes/cfg/etcd.conf $i:/opt/kubernetes/cfg/;done for i in ops-k8s-master02 ops-k8s-master03;do scp /etc/systemd/system/etcd.service $i:/etc/systemd/system/;done
注意:修改/opt/kubernetes/cfg/etcd.conf的ip地址和節點名稱
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do ssh -n $i "mkdir -p /var/lib/etcd && exit";done
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl status etcd
注意:全部的 etcd 節點重複上面的步驟,直到全部機器的 etcd 服務都已啓動。
etcdctl --endpoints=https://10.0.0.10:2379 \ --ca-file=/opt/kubernetes/ssl/ca.pem \ --cert-file=/opt/kubernetes/ssl/etcd.pem \ --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
#結果以下 member 69c08d868bbff6f1 is healthy: got healthy result from https://10.0.0.12:2379 member a87115828af54fe6 is healthy: got healthy result from https://10.0.0.10:2379 member f96d77d9089bd1e3 is healthy: got healthy result from https://10.0.0.11:2379 cluster is healthy ##驗證結果如上就OK了
如果集羣的話,IP須要換成VIP地址
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do ssh -n $i "yum install -y keepalived && cp /etc/keepalived/keepalived.conf{,.bak} && exit";done
注意:
一、綁定的網卡名與本文配置不一樣,請自行更改
二、注意keepalived master和backup其餘信息更改
cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://10.0.0.7:6443" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state MASTER interface ens192 virtual_router_id 61 priority 100 advert_int 1 mcast_src_ip 10.0.0.10 nopreempt authentication { auth_type PASS auth_pass sqP05dQgMSlzrxHj } unicast_peer { 10.0.0.11 10.0.0.12 } virtual_ipaddress { 10.0.0.7/24 } track_script { CheckK8sMaster } } EOF
cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://10.0.0.7:6443" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state BACKUP interface ens192 virtual_router_id 61 priority 90 advert_int 1 mcast_src_ip 10.0.0.11 nopreempt authentication { auth_type PASS auth_pass sqP05dQgMSlzrxHj } unicast_peer { 10.0.0.10 10.0.0.12 } virtual_ipaddress { 10.0.0.7/24 } track_script { CheckK8sMaster } } EOF
cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://10.0.0.7:6443" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state BACKUP interface ens192 virtual_router_id 61 priority 80 advert_int 1 mcast_src_ip 10.0.0.12 nopreempt authentication { auth_type PASS auth_pass sqP05dQgMSlzrxHj } unicast_peer { 10.0.0.10 10.0.0.11 } virtual_ipaddress { 10.0.0.7/24 } track_script { CheckK8sMaster } } EOF
systemctl enable keepalived
systemctl start keepalived
systemctl status keepalived
1、在主節點查看是否存在VIP ip a|grep 10.0.0.7 2、掛掉master節點,在backup01節點看是否存在VIP 在主節點執行 systemctl stop keepalived 在backup01節點看是否存在VIP ip a|grep 10.0.0.7 3、掛掉master、backup01節點,在backup02節點看是否存在VIP 在master、backup01節點執行 systemctl stop keepalived 在backup02節點看是否存在VIP ip a|grep 10.0.0.7
[root@k8s-master ~]# cd /usr/local/src/ [root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes.tar.gz [root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes-server-linux-amd64.tar.gz [root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes-client-linux-amd64.tar.gz [root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes-node-linux-amd64.tar.gz
[root@k8s-master ~]# cd /usr/local/src/ [root@k8s-master src]#wget https://github.com/kubernetes/kubernetes/releases/download/v1.10.3/kubernetes.tar.gz [root@k8s-master src]# tar -zxvf kubernetes.tar.gz [root@k8s-master src]# ll total 2664 drwxr-xr-x 9 root root 156 May 21 18:16 kubernetes -rw-r--r-- 1 root root 2726918 May 21 19:15 kubernetes.tar.gz [root@k8s-master src]# cd kubernetes/cluster/ [root@k8s-master cluster]# ./get-kube-binaries.sh
cd /usr/local/src/ #上傳包rz kubernetes-server-linux-amd64.tar.gz kubernetes.tar.gz tar xf kubernetes-server-linux-amd64.tar.gz tar xf kubernetes.tar.gz cd kubernetes ##發送到master其餘節點 for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp /usr/local/src/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} $i:/opt/kubernetes/bin/;done
cd /usr/local/src/ssl/ cat>kubernetes-csr.json<<EOF { "CN": "kubernetes", "hosts": [ "127.0.0.1", "10.1.0.1", "10.0.0.10", "10.0.0.11", "10.0.0.12", "10.0.0.7", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [{ "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" }] } EOF
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes #分發證書到master其餘節點 for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp kubernetes*.pem $i:/opt/kubernetes/ssl/;done
# head -c 16 /dev/urandom | od -An -t x | tr -d ' ' a39e5244495964d9f66a5b8e689546ae
cat>/opt/kubernetes/ssl/bootstrap-token.csv<<EOF a39e5244495964d9f66a5b8e689546ae,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /opt/kubernetes/ssl/bootstrap-token.csv $i:/opt/kubernetes/ssl/;done
cat>/opt/kubernetes/ssl/basic-auth.csv<<EOF admin,admin,1 readonly,readonly,2
EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /opt/kubernetes/ssl/basic-auth.csv $i:/opt/kubernetes/ssl/;done
etcd可寫成VIP地址
cat>/usr/lib/systemd/system/kube-apiserver.service<<EOF [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] ExecStart=/opt/kubernetes/bin/kube-apiserver \ --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \ --bind-address=10.0.0.10 \ --insecure-bind-address=127.0.0.1 \ --authorization-mode=Node,RBAC \ --runtime-config=rbac.authorization.k8s.io/v1 \ --kubelet-https=true \ --anonymous-auth=false \ --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \ --enable-bootstrap-token-auth \ --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \ --service-cluster-ip-range=10.1.0.0/16 \ --service-node-port-range=20000-40000 \ --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \ --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \ --client-ca-file=/opt/kubernetes/ssl/ca.pem \ --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \ --etcd-cafile=/opt/kubernetes/ssl/ca.pem \ --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \ --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \ --etcd-servers=https://10.0.0.10:2379,https://10.0.0.11:2379,https://10.0.0.12:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/opt/kubernetes/log/api-audit.log \ --event-ttl=1h \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /usr/lib/systemd/system/kube-apiserver.service $i:/usr/lib/systemd/system/;done
注意:修改一下相對應etcd集羣的IP地址和bind-address
systemctl daemon-reload systemctl enable kube-apiserver systemctl start kube-apiserver systemctl status kube-apiserver
cat>/usr/lib/systemd/system/kube-controller-manager.service<<EOF [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/opt/kubernetes/bin/kube-controller-manager \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --allocate-node-cidrs=true \ --service-cluster-ip-range=10.1.0.0/16 \ --cluster-cidr=10.2.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \ --root-ca-file=/opt/kubernetes/ssl/ca.pem \ --leader-elect=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /usr/lib/systemd/system/kube-controller-manager.service $i:/usr/lib/systemd/system/;done
systemctl daemon-reload systemctl enable kube-controller-manager systemctl start kube-controller-manager systemctl status kube-controller-manager
cat>/usr/lib/systemd/system/kube-scheduler.service<<EOF [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/opt/kubernetes/bin/kube-scheduler \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --leader-elect=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /usr/lib/systemd/system/kube-scheduler.service $i:/usr/lib/systemd/system/;done
systemctl daemon-reload systemctl enable kube-scheduler systemctl start kube-scheduler systemctl status kube-scheduler
cd /usr/local/src/ #上傳包rz kubernetes-client-linux-amd64.tar.gz tar xf kubernetes-client-linux-amd64.tar.gz for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp /usr/local/src/kubernetes/client/bin/kubectl $i:/opt/kubernetes/bin/;done
cd /usr/local/src/ssl/ cat>admin-csr.json<<EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:masters", "OU": "System" } ] } EOF
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes admin-csr.json | cfssljson -bare admin ls -l admin* #分發證書到集羣其餘節點 for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp admin*.pem $i:/opt/kubernetes/ssl/;done
如下操做其餘master節點也執行
kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://10.0.0.7:6443
kubectl config set-credentials admin \ --client-certificate=/opt/kubernetes/ssl/admin.pem \ --embed-certs=true \ --client-key=/opt/kubernetes/ssl/admin-key.pem
kubectl config set-context kubernetes \ --cluster=kubernetes \ --user=admin
kubectl config use-context kubernetes
# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-2 Healthy {"health": "true"} etcd-0 Healthy {"health": "true"} etcd-1 Healthy {"health": "true"}
yum install -y bash-completion source /usr/share/bash-completion/bash_completion source <(kubectl completion bash) echo "source <(kubectl completion bash)" >> ~/.bashrc
cd /usr/local/src/ #上傳包kubernetes-node-linux-amd64.tar.gz tar xf kubernetes-node-linux-amd64.tar.gz cd /usr/local/src/kubernetes/node/bin #發送至全部想建立pod的節點 for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /usr/local/src/kubernetes/node/bin/{kubelet,kube-proxy} $i:/opt/kubernetes/bin/;done
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
cd /usr/local/src/ssl
kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://10.0.0.7:6443 \ --kubeconfig=bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap \ --token=a39e5244495964d9f66a5b8e689546ae \ --kubeconfig=bootstrap.kubeconfig
kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp bootstrap.kubeconfig $i:/opt/kubernetes/cfg/;done
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /etc/cni/net.d/&& exit";done
cat>/etc/cni/net.d/10-default.conf<<EOF { "name": "flannel", "type": "flannel", "delegate": { "bridge": "docker0", "isDefaultGateway": true, "mtu": 1400 } } EOF
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /etc/cni/net.d/10-default.conf $i:/etc/cni/net.d/;done
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /var/lib/kubelet && exit";done
cat>/usr/lib/systemd/system/kubelet.service<<EOF [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/opt/kubernetes/bin/kubelet \ --address=10.0.0.10 \ --hostname-override=10.0.0.10 \ --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \ --cert-dir=/opt/kubernetes/ssl \ --network-plugin=cni \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/opt/kubernetes/bin/cni \ --cluster-dns=10.1.0.2 \ --cluster-domain=cluster.local. \ --hairpin-mode hairpin-veth \ --allow-privileged=true \ --fail-swap-on=false \ --logtostderr=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 EOF
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /usr/lib/systemd/system/kubelet.service $i:/usr/lib/systemd/system/;done
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet
# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-0_w5F1FM_la_SeGiu3Y5xELRpYUjjT2icIFk9gO9KOU 1m kubelet-bootstrap Pending
kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
結果以下:說明認證經過
-rw-r--r-- 1 root root 1042 May 28 23:09 kubelet-client.crt -rw------- 1 root root 227 May 28 23:08 kubelet-client.key
執行完畢後,查看節點狀態已是Ready的狀態了
#kubectl get node NAME STATUS ROLES AGE VERSION
yum install -y ipvsadm ipset conntrack
cd /usr/local/src/ssl/ cat>kube-proxy-csr.json<<EOF { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp kube-proxy*.pem $i:/opt/kubernetes/ssl/;done
kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://10.0.0.7:6443 \ --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \ --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \ --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp kube-proxy.kubeconfig $i:/opt/kubernetes/cfg/;done
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /var/lib/kube-proxy && exit";done
cat>/usr/lib/systemd/system/kube-proxy.service<<EOF [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/opt/kubernetes/bin/kube-proxy \ --bind-address=10.0.0.10 \ --hostname-override=10.0.0.10 \ --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \ --masquerade-all \ --feature-gates=SupportIPVSProxyMode=true \ --proxy-mode=ipvs \ --ipvs-min-sync-period=5s \ --ipvs-sync-period=5s \ --ipvs-scheduler=rr \ --logtostderr=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
發送管理文件到其餘節點,並更改爲相應的IP地址
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /usr/lib/systemd/system/kube-proxy.service $i:/usr/lib/systemd/system/;done
啓動Kubernetes Proxy,並查看啓動狀態
systemctl daemon-reload systemctl enable kube-proxy systemctl start kube-proxy systemctl status kube-proxy
檢查LVS狀態,並查看node狀態
ipvsadm -L -n
若是你在兩臺實驗機器都安裝了kubelet和proxy服務,使用下面的命令能夠檢查狀態:
kubectl get node
flannel下載地址(coreos旗下的):https://github.com/coreos/flannel/releases
在K8S集羣內部, nodeip podip clusterip的通訊機制是由k8s制定的路由規則,不是IP路由。
cd /usr/local/src/ssl
cat>flanneld-csr.json<<EOF { "CN": "flanneld", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp flanneld*.pem $i:/opt/kubernetes/ssl/;done
cd /usr/local/src # wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz #或上傳包 #rz flannel-v0.10.0-linux-amd64.tar.gz tar zxf flannel-v0.10.0-linux-amd64.tar.gz for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp flanneld mk-docker-opts.sh $i:/opt/kubernetes/bin/;done cd /usr/local/src/kubernetes/cluster/centos/node/bin/ for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp remove-docker0.sh $i:/opt/kubernetes/bin/;done
配置本機的配置文件
cat>/opt/kubernetes/cfg/flannel<<EOF FLANNEL_ETCD="-etcd-endpoints=https://10.0.0.10:2379,https://10.0.0.11:2379,https://10.0.0.12:2379" FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network" FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem" FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem" FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem" EOF
發送到k8s集羣其餘節點
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /opt/kubernetes/cfg/flannel $i:/opt/kubernetes/cfg/;done
cat>/usr/lib/systemd/system/flannel.service<<EOF [Unit] Description=Flanneld overlay address etcd agent After=network.target Before=docker.service [Service] EnvironmentFile=-/opt/kubernetes/cfg/flannel ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE} ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker Type=notify [Install] WantedBy=multi-user.target RequiredBy=docker.service EOF
發送到k8s集羣其餘節點
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /usr/lib/systemd/system/flannel.service $i:/usr/lib/systemd/system/;done
CNI(Container Network Interface)容器網絡接口,是Linux容器網絡配置的一組標準和庫,用戶須要根據這些標準和庫來開發本身的容器網絡插件。在github裏已經提供了一些經常使用的插件。CNI只專一解決容器網絡鏈接和容器銷燬時的資源釋放,提供一套框架,因此CNI能夠支持大量不一樣的網絡模式,而且容易實現。
相對於k8s exec直接執行可執行程序,cni 插件是對執行程序的封裝,規定了可執行程序的框架,固然最後仍是和exec 插件同樣,執行可執行程序。只不過exec 插件經過命令行數據讀取參數,cni插件經過環境變量以及配置文件讀入參數.
https://github.com/containernetworking/plugins/releases
cd /usr/local/src/ wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz #或者上傳 rz cni-plugins-amd64-v0.7.1.tgz mkdir /opt/kubernetes/bin/cni tar zxf cni-plugins-amd64-v0.7.1.tgz -C /opt/kubernetes/bin/cni
發送插件到集羣其餘節點
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /opt/kubernetes/bin/cni $i:/opt/kubernetes/bin/;done
/opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem --cert-file /opt/kubernetes/ssl/flanneld.pem --key-file /opt/kubernetes/ssl/flanneld-key.pem \ --no-sync -C https://10.0.0.10:2379,https://10.0.0.11:2379,https://10.0.0.12:2379 \ mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' >/dev/null 2>&1
systemctl daemon-reload systemctl enable flannel chmod +x /opt/kubernetes/bin/* systemctl start flannel systemctl status flannel
/usr/lib/systemd/system/docker.service
[Unit] #在Unit下面修改After和增長Requires After=network-online.target firewalld.service flannel.service Wants=network-online.target Requires=flannel.service [Service] #增長EnvironmentFile=-/run/flannel/docker,flannel啓動後就會建立這個文件 Type=notify EnvironmentFile=-/run/flannel/docker ExecStart=/usr/bin/dockerd $DOCKER_OPTS
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /usr/lib/systemd/system/docker.service $i:/usr/lib/systemd/system/;done
systemctl daemon-reload
systemctl restart docker
systemctl status docker
##應該集羣節點分配了不一樣的IP段
ip a
1、建立一個測試用的deployment kubectl run net-test --image=alpine --replicas=2 sleep 360000 2、查看獲取IP狀況 kubectl get pod -o wide 3、測試連通性 ping 10.2.83.2
測試網絡互通了,說明Flannel配置成功!
注意:namespace是kube-system
mkdir -p /opt/kubernetes/yaml/coredns
根據需求更改相應的配置(尤爲是資源控制)
cat>coredns.yaml<<EOF apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: Reconcile name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: EnsureExists name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists data: Corefile: | .:53 { errors health kubernetes cluster.local. in-addr.arpa ip6.arpa { pods insecure upstream fallthrough in-addr.arpa ip6.arpa } prometheus :9153 proxy . /etc/resolv.conf cache 30 } --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: replicas: 2 strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: coredns template: metadata: labels: k8s-app: coredns spec: serviceAccountName: coredns tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: "CriticalAddonsOnly" operator: "Exists" containers: - name: coredns image: coredns/coredns:1.0.6 imagePullPolicy: IfNotPresent resources: limits: memory: 2Gi requests: cpu: 2 memory: 1Gi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: coredns namespace: kube-system labels: k8s-app: coredns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: coredns clusterIP: 10.1.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP EOF
kubectl create -f coredns.yaml
kubectl get pod -n kube-system
#查看轉發記錄 ipvadm -Ln #運行一個pod測試(--rm 退出容器當即刪除) kubectl run dns-test --rm -it --image=alpine /bin/bash #進入容器 ##看是否外網可通 ping baidu.com
mkdir -p /opt/kubernetes/yaml/dashboard
cat>admin-user-sa-rbac.yaml<<EOF apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kube-system EOF
cat>kubernetes-dashboard.yaml<<EOF # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Configuration to deploy release version of the Dashboard UI compatible with # Kubernetes 1.8. # # Example usage: kubectl create -f <this_file> # ------------------- Dashboard Secret ------------------- # apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kube-system type: Opaque --- # ------------------- Dashboard Service Account ------------------- # apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system --- # ------------------- Dashboard Role & Role Binding ------------------- # kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: kubernetes-dashboard-minimal namespace: kube-system rules: # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. - apiGroups: [""] resources: ["secrets"] verbs: ["create"] # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] verbs: ["create"] # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] verbs: ["get", "update", "delete"] # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] # Allow Dashboard to get metrics from heapster. - apiGroups: [""] resources: ["services"] resourceNames: ["heapster"] verbs: ["proxy"] - apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:"] verbs: ["get"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: kubernetes-dashboard-minimal namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: kubernetes-dashboard-minimal subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system --- # ------------------- Dashboard Deployment ------------------- # kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard #image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3 image: mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.8.3 ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates # Uncomment the following line to manually specify Kubernetes API server Host # If not specified, Dashboard will attempt to auto discover the API server and connect # to it. Uncomment only if the default does not work. # - --apiserver-host=http://my-address:port volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs # Create on-disk volume to store exec logs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule --- # ------------------- Dashboard Service ------------------- # kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile name: kubernetes-dashboard namespace: kube-system spec: ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboard type: NodePort EOF
cat>ui-admin-rbac.yaml<<EOF kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: ui-admin rules: - apiGroups: - "" resources: - services - services/proxy verbs: - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: ui-admin-binding namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: ui-admin subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: admin EOF
cat>ui-read-rbac.yaml<<EOF kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: ui-read rules: - apiGroups: - "" resources: - services - services/proxy verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: ui-read-binding namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: ui-read subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: readonly EOF
kubectl create -f dashboard/
#獲取登陸的token kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Kubernetes v1.10.x HA全手動苦工安裝教學:https://zhangguanzhang.github.io/2018/05/05/Kubernetes_install/