k8s ansible部署部署文檔

一:基礎系統準備
ubuntu 1804----> root密碼:123456
主要操做:
 
1.更改網卡名稱爲eth0:
# vim /etc/default/grub
GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"
root@ubuntu:update-grub
root@ubuntu:reboot
 
2.更改系統ip地址:
# vim /etc/netplan/01-netcfg.yaml
network:
  version: 2
  renderer: networkd
  ethernets:
    eth0:
      dhcp4: no
      addresses: [172.16.99.121/24]
      gateway4: 172.16.99.254
      nameservers:
              addresses: [172.16.99.254]
 
3.應用ip配置並重啓測試:              
root@ubuntu:netplan  apply
 
4.更改主機名:
root@k8s-m1:~#echo 'k8s-m1' >/etc/hostname
root@k8s-m1:~# cat /etc/rc.local
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
echo 'k8s-m1' >/etc/hostname
exit 0
 
 
5.#安裝經常使用命令
apt-get update
apt-get purge ufw lxd lxd-client lxcfs lxc-common #卸載不用的包
apt-get  install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute  gcc openssh-server lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip
 
 
6.安裝docker:
# apt-get update
# apt-get -y install apt-transport-https ca-certificates curl software-properties-common
# curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# apt-get -y update && apt-get -y install docker-ce
# docker info
 
7.作快照
 
 
 
7.其餘配置:
# grep "^[a-Z]" /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
net.ipv4.ip_forward = 1
 
一:服務器初始化及證書製做:
 
配置主機名和host文件: 同步各服務器時間
 
172.16.99.145 ansible-vm2 ansible2.dexter.com
172.16.99.144 ansible-vm1  ansible1.dexter.com
172.16.99.143 etcd-vm3    etcd3.dexter.com
172.16.99.142 etcd-vm2     etcd2.dexter.com
172.16.99.141 etcd-vm1     etcd1.dexter.com
172.16.99.128 harbor-vm2  harbor2.dexter.com
172.16.99.127 harbor-vm1  harbor1.dexter.com
172.16.99.126 haproxy-vm2 haproxy2.dexter.com
172.16.99.125 haproxy-vm1  haproxy1.dexter.com
172.16.99.124 k8s-n2      k8sn2.dexter.com
172.16.99.123 k8s-n1       k8sn1.dexter.com
172.16.99.122 k8s-m2       k8sm2.dexter.com
172.16.99.121 k8s-m1       k8sm1.dexter.com
VIP 172.16.99.148
 
echo '*/10 * * * * root timedatectl set-timezone Asia/Shanghai && ntpdate time1.aliyun.com && hwclock -w >/dev/null 2>&1' >>/etc/crontab
 
 
二:安裝keepalived和haproxy服務器
安裝keepalived和haproxy
root@haproxy-vm1:~# apt-get install keepalived haproxy -y
root@haproxy-vm1:~# find / -name keepalived.*
配置keepalived
root@haproxy-vm1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.sample /etc/keepalived/keepalived.conf
root@haproxy-vm1:~# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
 
global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
 
vrrp_instance VI_1 {
    interface eth0
    virtual_router_id 50
    nopreempt
    priority 100
    advert_int 1
    virtual_ipaddress {
      172.16.99.148 dev eth0
    }
}
root@haproxy-vm1:~# systemctl restart keepalived
root@haproxy-vm1:~# systemctl enable keepalived
Synchronizing state of keepalived.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable keepalived
驗證keepalived是否生效
root@haproxy-vm1:~# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether fa:16:3e:f3:bc:58 brd ff:ff:ff:ff:ff:ff
    inet 172.16.99.125/24 brd 172.16.99.255 scope global dynamic eth0
       valid_lft 84622sec preferred_lft 84622sec
    inet 172.16.99.148/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fef3:bc58/64 scope link
       valid_lft forever preferred_lft forever
 
配置haproxy
root@haproxy-vm1:~# cat /etc/haproxy/haproxy.cfg
global
    log /dev/log    local0
    log /dev/log    local1 notice
    chroot /var/lib/haproxy
    stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
    stats timeout 30s
    user haproxy
    group haproxy
    daemon
    ca-base /etc/ssl/certs
    crt-base /etc/ssl/private
    ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
    ssl-default-bind-options no-sslv3
 
defaults
    log    global
    mode    http
    option    httplog
    option    dontlognull
        timeout connect 5000
        timeout client  50000
        timeout server  50000
    errorfile 400 /etc/haproxy/errors/400.http
    errorfile 403 /etc/haproxy/errors/403.http
    errorfile 408 /etc/haproxy/errors/408.http
    errorfile 500 /etc/haproxy/errors/500.http
    errorfile 502 /etc/haproxy/errors/502.http
    errorfile 503 /etc/haproxy/errors/503.http
    errorfile 504 /etc/haproxy/errors/504.http
 
listen k8s-api-server
    bind 0.0.0.0:6443
    mode tcp
    balance source
    server k8s-m1 172.16.99.121:6443 check inter 2000 fall 3 rise 5
    server k8s-m2 172.16.99.122:6443 check inter 2000 fall 3 rise 5
 
 
root@haproxy-vm1:~# systemctl start haproxy
root@haproxy-vm1:~# systemctl enable haproxy
 
拷貝配置給haproxy-vm2
root@haproxy-vm1:~# scp /etc/keepalived/keepalived.conf root@172.16.99.126:/etc/keepalived/
root@haproxy-vm1:~# scp /etc/haproxy/haproxy.cfg  root@172.16.99.126:/etc/haproxy/
 
haproxy-vm2作相同的操做
 
 
注:因爲我這邊的機器都是openstack的虛擬機,因此開通VIP後,要讓其餘虛機能ping通VIP地址,必須讓VIP關聯實例
[root@node1 ~]# openstack port list | grep "125\|126"
| 509886e0-cafe-4c87-b6ce-c3df3c5b9e19 |      | fa:16:3e:f3:bc:58 | ip_address='172.16.99.125', subnet_id='bbd536c6-a975-4841-8082-35b28de16ef0' | ACTIVE |
| f19c5f92-c101-49a3-a950-43d27578e805 |      | fa:16:3e:7a:44:4a | ip_address='172.16.99.126', subnet_id='bbd536c6-a975-4841-8082-35b28de16ef0' | ACTIVE |
[root@node1 ~]# neutron  port-update 509886e0-cafe-4c87-b6ce-c3df3c5b9e19  --allowed_address_pairs list=true type=dict ip_address=172.16.99.148
Updated port: 509886e0-cafe-4c87-b6ce-c3df3c5b9e19
[root@node1 ~]#  neutron  port-update f19c5f92-c101-49a3-a950-43d27578e805  --allowed_address_pairs list=true type=dict ip_address=172.16.99.148
Updated port: f19c5f92-c101-49a3-a950-43d27578e805
關聯後VIP能夠ping通
二:安裝harbor服務器:
安裝harbor
root@harbor-vm1:/usr/local/src# ls
harbor-offline-installer-v1.7.5.tgz
root@harbor-vm1:/usr/local/src# tar -xvf harbor-offline-installer-v1.7.5.tgz
root@harbor-vm1:/usr/local/src# cd harbor/
root@harbor-vm1:/usr/local/src/harbor# mkdir certs
root@harbor-vm1:/usr/local/src/harbor# vim harbor.cfg
hostname =  harbor1.dexter.com
ui_url_protocol = https
ssl_cert = /usr/local/src/harbor/cert/server.crt
ssl_cert_key = /usr/local/src/harbor/cert/server.key
harbor_admin_password = 123456
 
生成證書
root@harbor-vm1:~# mkdir  /usr/local/src/harbor/cert
root@harbor-vm1:~# cd  /usr/local/src/harbor/cert
root@harbor-vm1:/usr/local/src/harbor/cert# openssl genrsa -out server.key 2048  #生成私有key
root@harbor-vm1:/usr/local/src/harbor/cert# openssl req -x509 -new -nodes -key  server.key  -subj "/CN= harbor1.dexter.com" -days 7120 -out server.crt   #建立有效期時間的自簽名證書
root@harbor-vm2:/usr/local/src/harbor/cert# openssl req -x509 -new -nodes -key server.key -subj "/CN= harbor2.dexter.com" -days 7120 -out server.crt   #建立有效期時間的自簽名證書
注:若是沒法在ubuntu系統上生成server.crt,能夠嘗試在centos上生成後在複製到ubuntu上。
 
 
安裝docker
使用官方安裝腳本自動安裝 (僅適用於公網環境)
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
 
安裝docker ce
apt-get install docker-compose -y
 
# ./install.sh
 
配置客戶端使用harbor:
mkdir /etc/docker/certs.d/ harbor1.dexter.com -pv
mkdir /etc/docker/certs.d/ harbor2.dexter.com -pv
注:客戶端主要指master和node
 
[root@k8s-harbor1 harbor]# scp cert/server.crt  172.16.99.121:/etc/docker/certs.d/ harbor1.dexter.com/
[root@k8s-harbor2 harbor]# scp cert/server.crt  172.16.99.121:/etc/docker/certs.d/ harbor2.dexter.com/
 
#測試登陸
[root@k8s-m1 ~]# docker login harbor1.dexter.com
Username: admin
Password:
Login Succeeded
[root@k8s-m1 ~]# docker login harbor2.dexter.com
Username: admin
Password:
Login Succeeded
 
 
修改本機C:\Windows\System32\drivers\etc\hosts文件,添加以下兩行
172.16.99.128   harbor2.dexter.com
172.16.99.127   harbor1.dexter.com
 
嘗試使用瀏覽器打開harbor,帳號:admin,密碼:123456。
順便新建一個基礎鏡像庫
 
測試push鏡像到harbor:
root@k8s-m1:~# docker pull alpine
root@k8s-m1:~# docker tag alpine:latest harbor1.dexter.com/baseimages/alpine:latest
root@k8s-m1:~# docker push harbor1.dexter.com/baseimages/alpine:latest
 
注:提早要給master安裝docker
安裝docker
使用官方安裝腳本自動安裝 (僅適用於公網環境)
# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
 
 
ansible部署:
基礎環境準備(各節點):
# apt-get install python2.7 -y
# ln -s /usr/bin/python2.7 /usr/bin/python
root@ansible-vm1:~# apt-get install  ansible -y #ansible節點
root@ansible-vm1:~# apt-get install git -y         #ansible節點
 
分發密鑰(爲了ansible部署k8s集羣時免密鑰)
root@ansible-vm1:~# ssh-keygen  #生成密鑰對
root@ansible-vm1:~# apt-get install sshpass -y #ssh同步公鑰到各k8s服務器
#分發公鑰腳本:
root@ansible-vm1:~# cat scp.sh
#!/bin/bash
#目標主機列表
IP="
172.16.99.121
172.16.99.122
172.16.99.127
172.16.99.128
172.16.99.141
172.16.99.142
172.16.99.143
172.16.99.144
172.16.99.145
172.16.99.123
172.16.99.124
172.16.99.125
172.16.99.126"
 
for node in ${IP};do
    sshpass -p 123456 ssh-copy-id  -p22 ${node}  -o StrictHostKeyChecking=no
    if [ $? -eq 0 ];then
        echo "${node} 祕鑰copy完成"
    else
    echo "${node} 祕鑰copy失敗"
    fi
done
#執行腳本同步:
root@ansible-vm1:~# bash scp.sh
root@s2:~# vim ~/.vimrc #取消vim 自動縮進功能
set paste
 
1.6.2:clone項目:
 
root@ansible-vm1:~# git clone  -b  0.6.1 https://github.com/easzlab/kubeasz.git
root@ansible-vm1:~# mv /etc/ansible/* /opt/
root@ansible-vm1:~# mv kubeasz/* /etc/ansible/
root@ansible-vm1:~# cd  /etc/ansible/
root@ansible-vm1:/etc/ansible# cat hosts
root@ansible-vm1:/etc/ansible# cp example/hosts.m-masters.example ./hosts #複製hosts模 板文件
 
1.6.3:準備hosts文件:
 
root@ansible-vm1:/etc/ansible# pwd
/etc/ansible
root@ansible-vm1:/etc/ansible#cp example/hosts.m-masters.example ./hosts
root@ansible-vm1:/etc/ansible# cat hosts
# 集羣部署節點:通常爲運行ansible 腳本的節點
# 變量 NTP_ENABLED (=yes/no) 設置集羣是否安裝 chrony 時間同步
[deploy]
172.16.99.144 NTP_ENABLED=no
 
# etcd集羣請提供以下NODE_NAME,注意etcd集羣必須是1,3,5,7...奇數個節點
[etcd]
172.16.99.141 NODE_NAME=etcd1
172.16.99.142 NODE_NAME=etcd2
172.16.99.143 NODE_NAME=etcd3
 
[new-etcd] # 預留組,後續添加etcd節點使用
#192.168.1.x NODE_NAME=etcdx
 
[kube-master]
172.16.99.121
172.16.99.122
 
[new-master] # 預留組,後續添加master節點使用
#192.168.1.5
 
[kube-node]
172.16.99.123
172.16.99.124
 
[new-node] # 預留組,後續添加node節點使用
#192.168.1.xx
 
# 參數 NEW_INSTALL:yes表示新建,no表示使用已有harbor服務器
# 若是不使用域名,能夠設置 HARBOR_DOMAIN=""
[harbor]
#172.16.99.127 HARBOR_DOMAIN="harbor1.dexter.com" NEW_INSTALL=no
 
# 負載均衡(目前已支持多於2節點,通常2節點就夠了) 安裝 haproxy+keepalived
[lb]
#192.168.1.1 LB_ROLE=backup
#192.168.1.2 LB_ROLE=master
 
#【可選】外部負載均衡,用於自有環境負載轉發 NodePort 暴露的服務等
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
 
[all:vars]
# ---------集羣主要參數---------------
#集羣部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master
 
#集羣主版本號,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.13"
 
# 集羣 MASTER IP即 LB節點VIP地址,爲區別與默認apiserver端口,設置VIP監聽的服務端口8443
# 公有云上請使用雲負載均衡內網地址和監聽端口
MASTER_IP="172.16.99.148"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
 
# 集羣網絡插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="calico"
 
# 服務網段 (Service CIDR),注意不要與內網已有網段衝突
SERVICE_CIDR="10.20.0.0/16"
 
# POD 網段 (Cluster CIDR),注意不要與內網已有網段衝突
CLUSTER_CIDR="172.31.0.0/16"
 
# 服務端口範圍 (NodePort Range)
NODE_PORT_RANGE="30000-60000"
 
# kubernetes 服務 IP (預分配,通常是 SERVICE_CIDR 中第一個IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
 
# 集羣 DNS 服務 IP (從 SERVICE_CIDR 中預分配)
CLUSTER_DNS_SVC_IP="10.20.254.254"
 
# 集羣 DNS 域名
CLUSTER_DNS_DOMAIN="cluster.local."
 
# 集羣basic auth 使用的用戶名和密碼
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"
 
# ---------附加參數--------------------
#默認二進制文件目錄
bin_dir="/usr/bin"
 
#證書目錄
ca_dir="/etc/kubernetes/ssl"
 
#部署目錄,即 ansible 工做目錄,建議不要修改
base_dir="/etc/ansible"
 
1.6.4:準備二進制文件:
上傳k8s.1-13-5.tar.gz到ansible服務器的/etc/ansible/bin目錄下
root@ansible-vm1:~# cd /etc/ansible/bin
root@ansible-vm1:/etc/ansible/bin# pwd
/etc/ansible/bin
root@ansible-vm1:/etc/ansible/bin# tar xvf k8s.1-13-5.tar.gz
root@ansible-vm1:/etc/ansible/bin# mv bin/* .
 
 
1.6.4:開始按步驟部署:
經過ansible腳本初始化環境及部署k8s 高可用集羣
1.6.4.1:環境初始化
root@ansible-vm1:/etc/ansible/bin# cd /etc/ansible/
root@ansible-vm1:/etc/ansible# ansible-playbook 01.prepare.yml
 
1.6.4.2:部署etcd集羣:
可選更改啓動腳本路徑
 
root@ansible-vm1:/etc/ansible# ansible-playbook 02.etcd.yml
 
各etcd服務器驗證etcd服務:
root@etcd-vm1:~# export NODE_IPS="172.16.99.141 172.16.99.142 172.16.99.143"
root@etcd-vm1:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health; done
https://172.16.99.141:2379 is healthy: successfully committed proposal: took = 7.789938ms
https://172.16.99.142:2379 is healthy: successfully committed proposal: took = 6.976676ms
https://172.16.99.143:2379 is healthy: successfully committed proposal: took = 7.911517ms
 
1.6.4.3:部署docker:
可選更改啓動腳本路徑,可是docker已經提早安裝,所以不須要從新執行
root@ansible-vm1:/etc/ansible# ansible-playbook 03.docker.yml
 
1.6.4.4:部署master:
可選更改啓動腳本路徑
 
root@ansible-vm1:/etc/ansible# ansible-playbook 04.kube-master.yml
 
1.6.4.5:部署node:
node節點必須安裝docker
root@ansible-vm1:/etc/ansible# vim roles/kube-node/defaults/main.yml
# 基礎容器鏡像
SANDBOX_IMAGE: "harbor1.dexter.com/baseimages/pause-amd64:3.1"
root@ansible-vm1:/etc/ansible# ansible-playbook 05.kube-node.yml
 
驗證
root@k8s-m1:~# kubectl get nodes
NAME            STATUS                     ROLES    AGE   VERSION
172.16.99.121   Ready,SchedulingDisabled   master   75m   v1.13.5
172.16.99.122   Ready,SchedulingDisabled   master   75m   v1.13.5
172.16.99.123   Ready                      node     71m   v1.13.5
172.16.99.124   Ready                      node     71m   v1.13.5
1.6.4.5:部署網絡服務calico:
可選更改calico服務啓動腳本路徑,csr證書信息
 
# docker  load -i calico-cni.tar
# docker tag calico/cni:v3.3.6 harbor1.dexter.com/baseimages/cni:v3.3.6
# docker push harbor1.dexter.com/baseimages/cni:v3.3.6
 
# docker load -i calico-node.tar
# docker tag calico/node:v3.3.6  harbor1.dexter.com/baseimages/node:v3.3.6
# docker push harbor1.dexter.com/baseimages/node:v3.3.6
 
# docker load -i calico-kube-controllers.tar
# docker tag calico/kube-controllers:v3.3.6   harbor1.dexter.com/baseimages/kube-controllers:v3.3.6
# docker push harbor1.dexter.com/baseimages/kube-controllers:v3.3.6
 
root@ansible-vm1:/etc/ansible# vim roles/calico/defaults/main.yml
calico_ver: "v3.3.6"
root@ansible-vm1:/etc/ansible# mv /bin/calicoctl{,.bak}
上傳3.3.6的bin目錄的calicoctl到/etc/ansible/bin目錄下
root@ansible-vm1:/etc/ansible/bin# chmod +x calicoctl
 
修改以下部分,使用本地鏡像倉庫中的鏡像
root@ansible-vm1:/etc/ansible# vim roles/calico/templates/calico-v3.3.yaml.j2
        - name: calico-node
          image: harbor1.dexter.com/baseimages/node:v3.3.6
 
        - name: install-cni
          image: harbor1.dexter.com/baseimages/cni:v3.3.6
 
        - name: calico-kube-controllers
          image: harbor1.dexter.com/baseimages/kube-controllers:v3.3.6
執行部署網絡:
root@ansible-vm1:/etc/ansible# ansible-playbook 06.network.yml
 
驗證calico:
root@k8s-n1:~# calicoctl version
Client Version:    v3.3.6
Build date:        2019-03-28T00:10:36+0000
Git commit:        00031ac8
Cluster Version:   v3.3.6
Cluster Type:      k8s,bgp
 
root@k8s-m1:~# calicoctl node status
Calico process is running.
 
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+---------------+-------------------+-------+----------+-------------+
| 172.16.99.122 | node-to-node mesh | up    | 05:09:29 | Established |
+---------------+-------------------+-------+----------+-------------+
 
IPv6 BGP status
No IPv6 peers found.
 
kubectl run net-test1 --image=alpine --replicas=4 sleep 360000 #建立pod測試誇主機網絡通訊是否正常
 
1.6.4.6:添加node節點:
[kube-node]
192.168.7.110
 
[new-node] # 預留組,後續添加node節點使用
192.168.7.111
root@ansible-vm1:/etc/ansible# ansible-playbook 20.addnode.yml
 
1.6.4.7:添加master節點:
註釋掉lb,不然沒法下一步
 
[kube-master]
192.168.7.101
 
[new-master] # 預留組,後續添加master節點使用
192.168.7.102
 
root@k8s-m1:/etc/ansible# ansible-playbook 21.addmaster.yml
 
1.6.4.8:驗證當前狀態:
root@k8s-m1:~# calicoctl node status
Calico process is running.
 
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+---------------+-------------------+-------+----------+-------------+
| 172.16.99.122 | node-to-node mesh | up    | 06:18:57 | Established |
| 172.16.99.123 | node-to-node mesh | up    | 06:19:33 | Established |
| 172.16.99.124 | node-to-node mesh | up    | 06:19:14 | Established |
+---------------+-------------------+-------+----------+-------------+
 
IPv6 BGP status
No IPv6 peers found.
 
root@k8s-m1:~# kubectl  get nodes
NAME            STATUS                     ROLES    AGE    VERSION
172.16.99.121   Ready,SchedulingDisabled   master   123m   v1.13.5
172.16.99.122   Ready,SchedulingDisabled   master   123m   v1.13.5
172.16.99.123   Ready                      node     119m   v1.13.5
172.16.99.124   Ready                      node     119m   v1.13.5
相關文章
相關標籤/搜索