K8s入門篇之集羣基礎部署

1、系統環境初始化

  1、架構設計

  全部節點都操做:3個master(etcd集羣三個節點)和2個nodenode

  一、K8s服務調用如圖

  

  二、各組件說明

    一、API Server
  • 供Kubernetes API接口,主要處理 REST操做以及更新ETCD中的對象
  • 全部資源增刪改查的惟一入口。
    二、Scheduler
  • 資源調度,負責Pod到Node的調度。
    三、Controller Manager
  • 全部其餘羣集級別的功能,目前由控制器Manager執行。資源對象的自動化控制中心。
    四、ETCD
  • 全部持久化的狀態信息存儲在ETCD中。etcd組件做爲一個高可用、強一致性的服務發現存儲倉庫。
    五、Kubelet
  • 管理Pods以及容器、鏡像、 Volume等,實現對集羣對節點的管理。
    六、Kube-proxy
  • 提供網絡代理以及負載均衡,實現與Service通訊
    七、Docker Engine
  • 負責節點的容器的管理工做

  三、架構設計主機信息表

  

  2、設置主機名、分發集羣主機映射

  一、設置主機名(根據實際須要建立)

hostnamectl --static set-hostname ops-k8s-master01
hostnamectl --static set-hostname ops-k8s-master02
hostnamectl --static set-hostname ops-k8s-master03
hostnamectl --static set-hostname ops-k8s-node01
hostnamectl --static set-hostname ops-k8s-node02

  二、作主機映射

  本機作主機映射linux

cat <<EOF>>/etc/hosts
10.0.0.10 ops-k8s-master01 ops-k8s-master01.local.com
10.0.0.11 ops-k8s-master02 ops-k8s-master02.local.com
10.0.0.12 ops-k8s-master03 ops-k8s-master03.local.com
10.0.0.13 ops-k8s-node01 ops-k8s-node01.local.com
10.0.0.14 ops-k8s-node02 ops-k8s-node02.local.com
10.0.0.15 ops-k8s-harbor01 harbor01.local.com
10.0.0.16 ops-k8s-harbor02 harbor02.local.com
EOF

  分發hosts文件到集羣其餘節點c++

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02 ops-k8s-harbor01 ops-k8s-harbor02;do scp /etc/hosts $i:/etc/;done

  3、集羣免密鑰登陸

  一、建立證書

ssh-keygen  #一路回車便可
建立密鑰對,一路回車便可

  二、分發證書(包括本機)

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh-copy-id $i;done

  4、K8s環境初始化

  停防火牆、關閉Swap、關閉Selinux、設置內核、安裝依賴包、配置ntp(配置完後建議重啓一次)git

  一、初始化腳本

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /opt/scripts/shell && exit";done
cat>/opt/scripts/shell/init_k8s_env.sh<<EOF
#!/bin/bash
#by wzs at 20180419
#auto install k8s 
#1.stop firewall
systemctl stop firewalld
systemctl disable firewalld
#2.stop swap
swapoff -a 
sed -i 's/.*swap.*/#&/' /etc/fstab
#3.stop selinux
setenforce  0 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config
#4.安裝基本包
yum install -y net-tools vim lrzsz tree screen lsof tcpdump wget tree nmap tree dos2unix nc traceroute telnet nfs-utils mailx pciutils ftp ksh lvm2 gcc gcc-c++ dmidecode kde-l10n-Chinese* lsof
#5.set ntpdate
systemctl enable ntpdate.service
echo '*/30 * * * * /usr/sbin/ntpdate time7.aliyun.com >/dev/null 2>&1' > /tmp/crontab2.tmp
crontab /tmp/crontab2.tmp
systemctl start ntpdate.service
#6.set security limit
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
echo "* soft nproc 65536"  >> /etc/security/limits.conf
echo "* hard nproc 65536"  >> /etc/security/limits.conf
echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf
echo "* hard memlock  unlimited"  >> /etc/security/limits.conf
EOF
/opt/scripts/shell/init_k8s_env.sh

  二、發送初始化環境腳本到其餘節點

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /opt/scripts/shell/init_k8s_env.sh $i:/opt/scripts/shell/;done

   三、執行初始化腳本

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "/bin/bash /opt/scripts/shell/init_k8s_env.sh && exit";done 

  5、安裝Docker

  一、使用國內Docker源

cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

  二、Docker安裝,啓動docker並設置自啓動

yum install -y docker-ce
systemctl enable docker
systemctl start docker
systemctl status docker

  補充:github

    一、卸載老版本docker

yum list installed | grep docker
yum -y remove docker*
##刪除容器和鏡像 rm -rf /var/lib/docker

#其餘節點操做
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "yum -y remove docker* && rm -rf /var/lib/docker && exit";done

  二、安裝新版本shell

cat>install_docker.sh<<EOF
#!/bin/sh
###############################################################################
#
#VARS INIT
#
###############################################################################


###############################################################################
#
#Confirm Env
#
###############################################################################
date
echo "## Install Preconfirm"
echo "## Uname"
uname -r
echo
echo "## OS bit"
getconf LONG_BIT
echo

###############################################################################
#
#INSTALL yum-utils
#
###############################################################################
date
echo "## Install begins : yum-utils"
yum install -y yum-utils >/dev/null 2>&1
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Install ends   : yum-utils"
echo

###############################################################################
#
#Setting yum-config-manager
#
###############################################################################
echo "## Setting begins : yum-config-manager"
yum-config-manager \
   --add-repo \
   https://download.docker.com/linux/centos/docker-ce.repo >/dev/null 2>&1

if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Setting ends   : yum-config-manager"
echo

###############################################################################
#
#Update Package Cache
#
###############################################################################
echo "## Setting begins : Update package cache"
yum makecache fast >/dev/null 2>&1
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Setting ends   : Update package cache"
echo

###############################################################################
#
#INSTALL Docker-engine
#
###############################################################################
date
echo "## Install begins : docker-ce"
yum install -y docker-ce
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Install ends   : docker-ce"
date
echo

###############################################################################
#
#Stop Firewalld
#
###############################################################################
echo "## Setting begins : stop firewall"
systemctl stop firewalld
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
systemctl disable firewalld
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Setting ends   : stop firewall"
echo

###############################################################################
#
#Clear Iptable rules
#
###############################################################################
echo "## Setting begins : clear iptable rules"
iptables -F
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Setting ends   : clear iptable rules"
echo

###############################################################################
#
#Enable docker
#
###############################################################################
echo "## Setting begins : systemctl enable docker"
systemctl enable docker
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Setting ends   : systemctl enable docker"
echo


###############################################################################
#
#start docker
#
###############################################################################
echo "## Setting begins : systemctl restart docker"
systemctl restart docker
if [ $? -ne 0 ]; then
  echo "Install failed..."
  exit 1
fi
echo "## Setting ends   : systemctl restart docker"
echo


###############################################################################
#
#confirm docker version
#
###############################################################################
echo "## docker info"
docker info
echo

echo "## docker version"
docker version
EOF
install_docker.sh

  三、分發腳本到其餘節點並執行安裝express

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /opt/scripts/shell/install_docker.sh $i:/opt/scripts/shell/;done
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i " /bin/bash /opt/scripts/shell/install_docker.sh && exit";done
分發腳本到集羣其餘節點,並安裝

  6、準備軟件包和管理目錄

  一、建立管理目錄

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /opt/kubernetes/{cfg,bin,ssl,log,yaml} && exit";done

  目錄詳解apache

kubernetes/
├── bin  #二進制可執行文件存放目錄,設置環境變量
├── cfg  #配置管理目錄
├── log  #日誌管理目錄
├── ssl   #集羣證書存放目錄
└── yaml #yaml文件存放目錄

5 directories, 0 files

   二、下載並解壓軟件包

  下載地址:https://pan.baidu.com/disk/home?#/all?vmode=list&path=%2Fsoftware%2Fsalt-kubernetesjson

cd /usr/local/src
#將軟件包上傳
unzip -d /usr/local/src k8s-v1.10.1-manual.zip

  7、建立K8s的環境變量

  在集羣全部節點執行

echo "PATH=$PATH:/opt/kubernetes/bin">>/root/.bash_profile
source /root/.bash_profile

2、手動建立CA證書

  1、安裝CFSSL

  一、下載證書

cd /usr/local/src
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

  二、將cfssl添加執行權限,移動到設置的可執行命令的環境變量

chmod +x cfssl*
mv cfssl-certinfo_linux-amd64 /opt/kubernetes/bin/cfssl-certinfo
mv cfssljson_linux-amd64  /opt/kubernetes/bin/cfssljson
mv cfssl_linux-amd64  /opt/kubernetes/bin/cfssl

  三、複製cfssl命令文件到到其餘節點。若是實際中多個節點,就都須要同步複製。

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /opt/kubernetes/bin/cfssl* $i:/opt/kubernetes/bin/;done

  2、初始化cfssl

#建立管理證書的目錄
cd /usr/local/src
mkdir ssl && cd ssl cfssl print-defaults config > config.json cfssl print-defaults csr > csr.json

  3、建立用來生成 CA 文件的 JSON 配置文件

cat >ca-config.json<<EOF
{
  "signing": {
    "default": {
      "expiry": "175200h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "175200h"
      }
    }
  }
}
EOF

  4、建立用來生成 CA 證書籤名請求(CSR)的 JSON 配置文件

cat >ca-csr.json<<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

  5、生成CA證書(ca.pem)和密鑰(ca-key.pem)

cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls -l ca*

  6、分發證書

for i in in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r ca.csr ca.pem ca-key.pem ca-config.json $i:/opt/kubernetes/ssl/;done

3、手動部署ETCD集羣

  etcd下載地址:https://github.com/coreos/etcd/releases/

  1、準備etcd軟件包

cd /usr/local/src/
wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
tar xf etcd-v3.2.18-linux-amd64.tar.gz
cd etcd-v3.2.18-linux-amd64
for i in in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp etcd etcdctl $i:/opt/kubernetes/bin/;done

  2、建立 etcd 證書籤名請求

cd /usr/local/src/ssl
cat>etcd-csr.json<<EOF
{
    "CN": "etcd",
    "hosts": [
        "127.0.0.1",
        "10.0.0.10",
        "10.0.0.11",
        "10.0.0.12"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [{
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "k8s",
        "OU": "System"
    }]
}
EOF

  3、生成 etcd 證書和私鑰

cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
  -ca-key=/opt/kubernetes/ssl/ca-key.pem \
  -config=/opt/kubernetes/ssl/ca-config.json \
  -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
#生成如下證書文件
ls -l etcd*

  4、將證書移動到/opt/kubernetes/ssl

  併發送證書到etcd集羣其餘節點

for i in in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp etcd*.pem $i:/opt/kubernetes/ssl/;done

  5、設置ETCD配置文件

cat>/opt/kubernetes/cfg/etcd.conf<<EOF
#[member]
ETCD_NAME="ops-k8s-master01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://10.0.0.10:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.0.0.10:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.10:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="ops-k8s-master01=https://10.0.0.10:2380,ops-k8s-master02=https://10.0.0.11:2380,ops-k8s-master03=https://10.0.0.12:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://10.0.0.10:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
EOF
/opt/kubernetes/cfg/etcd.conf

  6、建立ETCD系統服務

cat>/etc/systemd/system/etcd.service<<EOF
[Unit]
Description=Etcd Server
After=network.target

[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify

[Install]
WantedBy=multi-user.target
EOF
/etc/systemd/system/etcd.service

  7、發送文件到集羣其餘節點,並啓動服務

  一、發送文件到集羣其餘節點

for i in ops-k8s-master02 ops-k8s-master03;do scp /opt/kubernetes/cfg/etcd.conf $i:/opt/kubernetes/cfg/;done
for i in ops-k8s-master02 ops-k8s-master03;do scp /etc/systemd/system/etcd.service $i:/etc/systemd/system/;done

  注意:修改/opt/kubernetes/cfg/etcd.conf的ip地址和節點名稱

  二、建立服務必要的目錄,並啓動服務

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do ssh -n $i "mkdir -p /var/lib/etcd && exit";done
systemctl daemon-reload 
systemctl enable etcd
systemctl start etcd
systemctl status etcd

  注意:全部的 etcd 節點重複上面的步驟,直到全部機器的 etcd 服務都已啓動。

  8、驗證etcd集羣

etcdctl --endpoints=https://10.0.0.10:2379 \
   --ca-file=/opt/kubernetes/ssl/ca.pem \
   --cert-file=/opt/kubernetes/ssl/etcd.pem \
   --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health

#結果以下 member 69c08d868bbff6f1 is healthy: got healthy result from https:
//10.0.0.12:2379 member a87115828af54fe6 is healthy: got healthy result from https://10.0.0.10:2379 member f96d77d9089bd1e3 is healthy: got healthy result from https://10.0.0.11:2379 cluster is healthy ##驗證結果如上就OK了

4、Master節點部署

  如果集羣的話,IP須要換成VIP地址

  1、安裝、配置keepalived

  一、在全部的mster節點安裝keepalived服務

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do ssh -n $i "yum install -y keepalived && cp /etc/keepalived/keepalived.conf{,.bak} && exit";done

  二、修改配置文件

  注意:

    一、綁定的網卡名與本文配置不一樣,請自行更改

    二、注意keepalived master和backup其餘信息更改

    一、ops-k8s-master01的keepalived.conf(keepaliced的master)
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://10.0.0.7:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 61
    priority 100
    advert_int 1
    mcast_src_ip 10.0.0.10
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        10.0.0.11        
        10.0.0.12        
    }
    virtual_ipaddress {
        10.0.0.7/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF
/etc/keepalived/keepalived.conf
    二、ops-k8s-master02的keepalived.conf(keepaliced的backup01)
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://10.0.0.7:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens192
    virtual_router_id 61
    priority 90
    advert_int 1
    mcast_src_ip 10.0.0.11
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        10.0.0.10
        10.0.0.12
    }
    virtual_ipaddress {
        10.0.0.7/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF
/etc/keepalived/keepalived.conf
    三、ops-k8s-master02的keepalived.conf(keepaliced的backup02)
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://10.0.0.7:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens192
    virtual_router_id 61
    priority 80
    advert_int 1
    mcast_src_ip 10.0.0.12
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        10.0.0.10
        10.0.0.11
    }
    virtual_ipaddress {
        10.0.0.7/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF
/etc/keepalived/keepalived.conf

  三、啓動keepalived

systemctl enable keepalived
systemctl start keepalived
systemctl status keepalived

  四、驗證結果

1、在主節點查看是否存在VIP
ip a|grep 10.0.0.7

2、掛掉master節點,在backup01節點看是否存在VIP
    在主節點執行
systemctl stop keepalived
    在backup01節點看是否存在VIP
ip a|grep 10.0.0.7


3、掛掉master、backup01節點,在backup02節點看是否存在VIP
    在master、backup01節點執行
systemctl stop keepalived
    在backup02節點看是否存在VIP
ip a|grep 10.0.0.7

  2、K8s API服務部署

  一、準備安裝包,並拷貝命令到集羣

    一、補充安裝包下載方式(參考)
      方式一(推薦):從https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md 頁面下載 client 或 server tar包 文件
[root@k8s-master ~]# cd /usr/local/src/
[root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes.tar.gz
[root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes-server-linux-amd64.tar.gz
[root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes-client-linux-amd64.tar.gz
[root@k8s-master src]# wget https://dl.k8s.io/v1.10.1/kubernetes-node-linux-amd64.tar.gz
      方式二:準備軟件包從github release 頁面下載發佈版tar包,解壓後再執行下載腳本.
複製代碼
[root@k8s-master ~]# cd /usr/local/src/
[root@k8s-master src]#wget https://github.com/kubernetes/kubernetes/releases/download/v1.10.3/kubernetes.tar.gz
[root@k8s-master src]# tar -zxvf kubernetes.tar.gz
[root@k8s-master src]# ll
total 2664
drwxr-xr-x 9 root root     156 May 21 18:16 kubernetes
-rw-r--r-- 1 root root 2726918 May 21 19:15 kubernetes.tar.gz
[root@k8s-master src]# cd kubernetes/cluster/
[root@k8s-master cluster]# ./get-kube-binaries.sh 
    二、集羣部署步驟
cd /usr/local/src/
#上傳包rz kubernetes-server-linux-amd64.tar.gz kubernetes.tar.gz 
tar xf kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes.tar.gz
cd kubernetes
##發送到master其餘節點
for i in  ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp /usr/local/src/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} $i:/opt/kubernetes/bin/;done

  二、建立生成CSR的JSON配置文件

cd /usr/local/src/ssl/
cat>kubernetes-csr.json<<EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "10.1.0.1",
        "10.0.0.10",
        "10.0.0.11",
        "10.0.0.12",
        "10.0.0.7",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [{
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "k8s",
        "OU": "System"
    }]
}
EOF

  三、生成 kubernetes 證書和私鑰

cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
   -ca-key=/opt/kubernetes/ssl/ca-key.pem \
   -config=/opt/kubernetes/ssl/ca-config.json \
   -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

#分發證書到master其餘節點
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp kubernetes*.pem $i:/opt/kubernetes/ssl/;done

  四、建立 kube-apiserver 使用的客戶端 token 文件,發送到master其餘節點

# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
a39e5244495964d9f66a5b8e689546ae
cat>/opt/kubernetes/ssl/bootstrap-token.csv<<EOF a39e5244495964d9f66a5b8e689546ae,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /opt/kubernetes/ssl/bootstrap-token.csv $i:/opt/kubernetes/ssl/;done

  五、建立基礎用戶名/密碼認證配置

cat>/opt/kubernetes/ssl/basic-auth.csv<<EOF
admin,admin,1
readonly,readonly,2
EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /opt/kubernetes/ssl/basic-auth.csv $i:/opt/kubernetes/ssl/;done

  六、部署Kubernetes API Server

  etcd可寫成VIP地址

cat>/usr/lib/systemd/system/kube-apiserver.service<<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
  --bind-address=10.0.0.10 \
  --insecure-bind-address=127.0.0.1 \
  --authorization-mode=Node,RBAC \
  --runtime-config=rbac.authorization.k8s.io/v1 \
  --kubelet-https=true \
  --anonymous-auth=false \
  --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
  --enable-bootstrap-token-auth \
  --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
  --service-cluster-ip-range=10.1.0.0/16 \
  --service-node-port-range=20000-40000 \
  --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
  --client-ca-file=/opt/kubernetes/ssl/ca.pem \
  --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --etcd-cafile=/opt/kubernetes/ssl/ca.pem \
  --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \
  --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \
  --etcd-servers=https://10.0.0.10:2379,https://10.0.0.11:2379,https://10.0.0.12:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/opt/kubernetes/log/api-audit.log \
  --event-ttl=1h \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
for i in ops-k8s-master02 ops-k8s-master03;do scp /usr/lib/systemd/system/kube-apiserver.service $i:/usr/lib/systemd/system/;done

  注意:修改一下相對應etcd集羣的IP地址和bind-address

  七、啓動API server服務

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

  3、部署Controller Manager服務

  一、建立服務管理文件,發送到其餘節點

cat>/usr/lib/systemd/system/kube-controller-manager.service<<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.1.0.0/16 \
  --cluster-cidr=10.2.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/opt/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
/usr/lib/systemd/system/kube-controller-manager.service
for i in ops-k8s-master02 ops-k8s-master03;do scp /usr/lib/systemd/system/kube-controller-manager.service $i:/usr/lib/systemd/system/;done

  二、啓動Controller Manager,並查看服務狀態

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

  3、部署Kubernetes Scheduler

   一、建立服務管理文件,發送到其餘節點

cat>/usr/lib/systemd/system/kube-scheduler.service<<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://127.0.0.1:8080 \
  --leader-elect=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
/usr/lib/systemd/system/kube-scheduler.service
for i in ops-k8s-master02 ops-k8s-master03;do scp /usr/lib/systemd/system/kube-scheduler.service $i:/usr/lib/systemd/system/;done

  二、啓動Kubernetes Scheduler,並查看服務狀態

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler

  4、部署kubectl 命令行工具

  一、準備二進制包

cd /usr/local/src/
#上傳包rz kubernetes-client-linux-amd64.tar.gz 
tar xf kubernetes-client-linux-amd64.tar.gz
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp /usr/local/src/kubernetes/client/bin/kubectl $i:/opt/kubernetes/bin/;done

  二、建立admin簽名請求

cd /usr/local/src/ssl/

cat>admin-csr.json<<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

  三、生成 admin 證書和私鑰

cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
   -ca-key=/opt/kubernetes/ssl/ca-key.pem \
   -config=/opt/kubernetes/ssl/ca-config.json \
   -profile=kubernetes admin-csr.json | cfssljson -bare admin
ls -l admin*

#分發證書到集羣其餘節點
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03;do scp admin*.pem $i:/opt/kubernetes/ssl/;done

  如下操做其餘master節點也執行

  四、設置集羣參數

kubectl config set-cluster kubernetes \
   --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
   --server=https://10.0.0.7:6443

  五、設置客戶端認證參數

 kubectl config set-credentials admin \
   --client-certificate=/opt/kubernetes/ssl/admin.pem \
   --embed-certs=true \
   --client-key=/opt/kubernetes/ssl/admin-key.pem

  六、設置上下文參數

kubectl config set-context kubernetes \
   --cluster=kubernetes \
   --user=admin

  七、設置默認上下文

kubectl config use-context kubernetes

  八、使用kubectl工具

#  kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-2               Healthy   {"health": "true"}   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}  

  九、安裝kubectl命令補全包

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

5、Node節點部署

  1、部署kubelet

  一、二進制包準備

cd /usr/local/src/
#上傳包kubernetes-node-linux-amd64.tar.gz
tar xf kubernetes-node-linux-amd64.tar.gz
cd /usr/local/src/kubernetes/node/bin

#發送至全部想建立pod的節點
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /usr/local/src/kubernetes/node/bin/{kubelet,kube-proxy} $i:/opt/kubernetes/bin/;done

  二、建立角色綁定

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

  三、建立 kubelet bootstrapping kubeconfig 文件

    一、設置集羣參數
cd /usr/local/src/ssl

kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://10.0.0.7:6443 \ --kubeconfig=bootstrap.kubeconfig
    二、設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \
   --token=a39e5244495964d9f66a5b8e689546ae \
   --kubeconfig=bootstrap.kubeconfig 
    三、設置上下文參數
kubectl config set-context default \
   --cluster=kubernetes \
   --user=kubelet-bootstrap \
   --kubeconfig=bootstrap.kubeconfig
    四、選擇默認上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    五、拷貝到本機和集羣其餘節點指定目錄
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp bootstrap.kubeconfig $i:/opt/kubernetes/cfg/;done
    六、部署kubelet 1.設置CNI支持
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /etc/cni/net.d/&& exit";done
cat>/etc/cni/net.d/10-default.conf<<EOF
{
        "name": "flannel",
        "type": "flannel",
        "delegate": {
            "bridge": "docker0",
            "isDefaultGateway": true,
            "mtu": 1400
        }
}
EOF
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /etc/cni/net.d/10-default.conf $i:/etc/cni/net.d/;done

  四、kubelet目錄

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /var/lib/kubelet && exit";done

  五、建立kubelet服務配置

    一、建立管理文件
cat>/usr/lib/systemd/system/kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
  --address=10.0.0.10 \
  --hostname-override=10.0.0.10 \
  --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
  --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
  --cert-dir=/opt/kubernetes/ssl \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/kubernetes/bin/cni \
  --cluster-dns=10.1.0.2 \
  --cluster-domain=cluster.local. \
  --hairpin-mode hairpin-veth \
  --allow-privileged=true \
  --fail-swap-on=false \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
EOF
/usr/lib/systemd/system/kubelet.service
    二、發送到集羣中其餘節點,並更改爲對應的IP地址
for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /usr/lib/systemd/system/kubelet.service  $i:/usr/lib/systemd/system/;done

  六、啓動kubelet,並查看服務狀態

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

  七、查看csr請求 注意是在配置的服務器上執行

# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-0_w5F1FM_la_SeGiu3Y5xELRpYUjjT2icIFk9gO9KOU   1m        kubelet-bootstrap   Pending

  八、批准kubelet 的 TLS 證書請求

kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve

  結果以下:說明認證經過

-rw-r--r-- 1 root root 1042 May 28 23:09 kubelet-client.crt
-rw------- 1 root root  227 May 28 23:08 kubelet-client.key

 

  執行完畢後,查看節點狀態已是Ready的狀態了

  #kubectl get node NAME STATUS ROLES AGE VERSION

  2、部署Kubernetes Proxy

  一、配置kube-proxy使用LVS

yum install -y ipvsadm ipset conntrack

  二、建立 kube-proxy 證書請求

cd /usr/local/src/ssl/

cat>kube-proxy-csr.json<<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

  三、生成證書

cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
   -ca-key=/opt/kubernetes/ssl/ca-key.pem \
   -config=/opt/kubernetes/ssl/ca-config.json \
   -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

  四、分發證書到集羣其餘節點

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp kube-proxy*.pem $i:/opt/kubernetes/ssl/;done

  五、建立kube-proxy配置文件

kubectl config set-cluster kubernetes \
   --certificate-authority=/opt/kubernetes/ssl/ca.pem \
   --embed-certs=true \
   --server=https://10.0.0.7:6443 \
   --kubeconfig=kube-proxy.kubeconfig

 

kubectl config set-credentials kube-proxy \
   --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
   --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
   --embed-certs=true \
   --kubeconfig=kube-proxy.kubeconfig

 

kubectl config set-context default \
   --cluster=kubernetes \
   --user=kube-proxy \
   --kubeconfig=kube-proxy.kubeconfig

 

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

  六、分發kubeconfig配置文件到集羣其餘節點

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp kube-proxy.kubeconfig  $i:/opt/kubernetes/cfg/;done

  七、建立kube-proxy服務配置

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do ssh -n $i "mkdir -p /var/lib/kube-proxy && exit";done
cat>/usr/lib/systemd/system/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
  --bind-address=10.0.0.10 \
  --hostname-override=10.0.0.10 \
  --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
  --feature-gates=SupportIPVSProxyMode=true \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --logtostderr=true \
  --v=2 \
  --logtostderr=false \
  --log-dir=/opt/kubernetes/log

Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
/usr/lib/systemd/system/kube-proxy.service

  發送管理文件到其餘節點,並更改爲相應的IP地址

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /usr/lib/systemd/system/kube-proxy.service $i:/usr/lib/systemd/system/;done

  八、啓動Kubernetes Proxy,並查看啓動狀態

systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy

  九、檢查LVS狀態,並查看node狀態

ipvsadm -L -n

  若是你在兩臺實驗機器都安裝了kubelet和proxy服務,使用下面的命令能夠檢查狀態:

kubectl get node

6、Flannel網絡部署

  flannel下載地址(coreos旗下的):https://github.com/coreos/flannel/releases

  1、Node運行pod的基礎知識

  一、Node節點上運行POD

  

  二、須要瞭解知識點

    一、RC
  • RC是K8s集羣中最先的保證Pod高可用的API對象。經過監控運行中的Pod來保證集羣中運行指定數目的Pod副本。
  • 指定的數目能夠是多個也能夠是1個;少於指定數目, RC就會啓動運行新的Pod副本;多於指定數目, RC就會殺死多餘的Pod副本。
  • 即便在指定數目爲1的狀況下,經過RC運行Pod也比直接運行Pod更明智,由於RC也能夠發揮它高可用的能力,保證永遠有1個Pod在運行。
    二、RS
  • RS是新一代RC,提供一樣的高可用能力,區別主要在於RS後來居上,能支持更多中的匹配模式。副本集對象通常不單獨使用,而是做爲部署的理想狀態參數使用
  • RS是K8S 1.2中出現的概念,是RC的升級。通常和Deployment共同使用。
  • Deployment表示用戶對K8s集羣的一次更新操做。 Deployment是一個比RS應用模式更廣的API對象能夠是建立一個新的服務,更新一個新的服務,也能夠是滾動升級一個服務。滾動升級一個服務,實際是建立一個新的RS,而後逐漸將新RS中副本數增長到理想狀態,將舊RS中的副本數減少到0的複合操做;
    三、deployment
  • 一個複合操做用一個RS是不太好描述的,因此用一個更通用的Deployment來描述。
  • RC、 RS和Deployment只是保證了支撐服務的POD的數量,可是沒有解 決如何訪問這些服務的問題。一個Pod只是一個運行服務的實例,隨時可 能在一個節點上中止,在另外一個節點以一個新的IP啓動一個新的Pod,所以不能以肯定的IP和端口號提供服務
  • 要穩定地提供服務須要服務發現和負載均衡能力。服務發現完成的工做,是針對客戶端訪問的服務,找到對應的的後端服務實例。
    四、service(cluster IP)
  • 在K8集羣中,客戶端須要訪問的服務就是Service對象。每一個Service會對應一個集羣內部有效的虛擬IP,集羣內部經過虛擬IP訪問一個服務。
    五、Node IP、Pod IP、Cluster IP
  • Node IP: 節點設備的IP,如物理機,虛擬機等容器宿主的實際IP。
  • Pod IP: Pod 的IP地址,是根據docker0網格IP段進行分配的。
  • Cluster IP: Service的IP,是一個虛擬IP,僅做用於service對象,由k8s管理和分配,須要結合service port才能使用,單獨的IP沒有通訊功能,集羣外訪問須要一些修改。

  在K8S集羣內部, nodeip podip clusterip的通訊機制是由k8s制定的路由規則,不是IP路由。

  2、Flannel服務部署

  一、建立flannel證書請求

cd /usr/local/src/ssl

cat
>flanneld-csr.json<<EOF { "CN": "flanneld", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF

  二、生成證書

cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
   -ca-key=/opt/kubernetes/ssl/ca-key.pem \
   -config=/opt/kubernetes/ssl/ca-config.json \
   -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld

  三、分發證書

for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp flanneld*.pem $i:/opt/kubernetes/ssl/;done

  四、下載安裝flannel軟件包

cd /usr/local/src
# wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
#或上傳包
#rz flannel-v0.10.0-linux-amd64.tar.gz
tar zxf flannel-v0.10.0-linux-amd64.tar.gz
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp flanneld mk-docker-opts.sh  $i:/opt/kubernetes/bin/;done

cd /usr/local/src/kubernetes/cluster/centos/node/bin/
for i in ops-k8s-master01 ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp remove-docker0.sh $i:/opt/kubernetes/bin/;done

  五、配置Flannel配置文件

  配置本機的配置文件

cat>/opt/kubernetes/cfg/flannel<<EOF
FLANNEL_ETCD="-etcd-endpoints=https://10.0.0.10:2379,https://10.0.0.11:2379,https://10.0.0.12:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"
EOF

  發送到k8s集羣其餘節點

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /opt/kubernetes/cfg/flannel $i:/opt/kubernetes/cfg/;done

  六、設置Flannel系統服務

cat>/usr/lib/systemd/system/flannel.service<<EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker

Type=notify

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
/usr/lib/systemd/system/flannel.service

  發送到k8s集羣其餘節點

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp /usr/lib/systemd/system/flannel.service $i:/usr/lib/systemd/system/;done

  3、Flannel CNI集成

  一、簡述CNI

  CNI(Container Network Interface)容器網絡接口,是Linux容器網絡配置的一組標準和庫,用戶須要根據這些標準和庫來開發本身的容器網絡插件。在github裏已經提供了一些經常使用的插件。CNI只專一解決容器網絡鏈接和容器銷燬時的資源釋放,提供一套框架,因此CNI能夠支持大量不一樣的網絡模式,而且容易實現。

  相對於k8s exec直接執行可執行程序,cni 插件是對執行程序的封裝,規定了可執行程序的框架,固然最後仍是和exec 插件同樣,執行可執行程序。只不過exec 插件經過命令行數據讀取參數,cni插件經過環境變量以及配置文件讀入參數.

  二、下載CNI插件

  https://github.com/containernetworking/plugins/releases

cd /usr/local/src/
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
#或者上傳 rz cni-plugins-amd64-v0.7.1.tgz
mkdir /opt/kubernetes/bin/cni
tar zxf cni-plugins-amd64-v0.7.1.tgz -C /opt/kubernetes/bin/cni

  發送插件到集羣其餘節點

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /opt/kubernetes/bin/cni $i:/opt/kubernetes/bin/;done

  三、建立Etcd的key

/opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem --cert-file /opt/kubernetes/ssl/flanneld.pem --key-file /opt/kubernetes/ssl/flanneld-key.pem \
      --no-sync -C https://10.0.0.10:2379,https://10.0.0.11:2379,https://10.0.0.12:2379 \
mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' >/dev/null 2>&1

  四、啓動flannel,並查看服務狀態(全部節點操做)

systemctl daemon-reload
systemctl enable flannel
chmod +x /opt/kubernetes/bin/*
systemctl start flannel
systemctl status flannel

  3、配置Docker使用Flannel

  一、更改docker的系統服務文件/usr/lib/systemd/system/docker.service

[Unit] #在Unit下面修改After和增長Requires
After=network-online.target firewalld.service flannel.service
Wants=network-online.target
Requires=flannel.service 
[Service] #增長EnvironmentFile=-/run/flannel/docker,flannel啓動後就會建立這個文件
Type=notify
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_OPTS

  二、分發到k8s集羣其餘節點

for i in ops-k8s-master02 ops-k8s-master03 ops-k8s-node01 ops-k8s-node02;do scp -r /usr/lib/systemd/system/docker.service $i:/usr/lib/systemd/system/;done

  三、重啓docker,並查看啓動狀態

systemctl daemon-reload
systemctl restart docker
systemctl status docker

  四、查看集羣節點docker的ip變化

##應該集羣節點分配了不一樣的IP段
ip a

  五、建立一個應用,測試網絡是否互通

1、建立一個測試用的deployment
kubectl run net-test --image=alpine --replicas=2 sleep 360000

2、查看獲取IP狀況
kubectl get pod -o wide

3、測試連通性
ping 10.2.83.2

  測試網絡互通了,說明Flannel配置成功!

7、CoreDNS和Dashboard部署

  注意:namespace是kube-system

  1、部署CoreDNS

  一、建立yaml管理目錄

mkdir -p /opt/kubernetes/yaml/coredns

  二、寫 coredns.yaml文件

  根據需求更改相應的配置(尤爲是資源控制)

cat>coredns.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local. in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        proxy . /etc/resolv.conf
        cache 30
    }
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      serviceAccountName: coredns
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      containers:
      - name: coredns
        image: coredns/coredns:1.0.6
        imagePullPolicy: IfNotPresent
 resources: limits: memory: 2Gi requests: cpu: 2 memory: 1Gi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 10.1.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
EOF

  三、建立CoreDNS

kubectl create -f coredns.yaml
kubectl get pod -n kube-system

  四、測試

#查看轉發記錄
ipvadm -Ln

#運行一個pod測試(--rm 退出容器當即刪除)
kubectl run dns-test --rm -it --image=alpine /bin/bash
#進入容器
##看是否外網可通
ping baidu.com

  2、部署Dashboard

  一、建立yaml管理目錄

mkdir -p /opt/kubernetes/yaml/dashboard

  二、寫dashboard相關的yaml文件

cat>admin-user-sa-rbac.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF
admin-user-sa-rbac.yaml
cat>kubernetes-dashboard.yaml<<EOF
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>

# ------------------- Dashboard Secret ------------------- #

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque

---
# ------------------- Dashboard Service Account ------------------- #

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Role & Role Binding ------------------- #

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create"]
  # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["create"]
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Deployment ------------------- #

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        #image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
        image: mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.8.3
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule

---
# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort
EOF
kubernetes-dashboard.yaml
cat>ui-admin-rbac.yaml<<EOF
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: ui-admin
rules:
- apiGroups:
  - ""
  resources:
  - services
  - services/proxy
  verbs:
  - '*'

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: ui-admin-binding
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ui-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: admin
EOF
ui-admin-rbac.yaml
cat>ui-read-rbac.yaml<<EOF
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: ui-read
rules:
- apiGroups:
  - ""
  resources:
  - services
  - services/proxy
  verbs:
  - get
  - list
  - watch

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: ui-read-binding
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ui-read
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: readonly
EOF
ui-read-rbac.yaml

  三、建立Dashboard

kubectl create -f dashboard/

#獲取登陸的token kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

  四、訪問測試

8、擴展連接

  Kubernetes v1.10.x HA全手動苦工安裝教學:https://zhangguanzhang.github.io/2018/05/05/Kubernetes_install/

https://github.com/coreos/etcd/releases/

相關文章
相關標籤/搜索