軟件版本node
操做系統版本:CentOS Linux release 7.4.1708linux
ansible: 2.9.10nginx
etcd:3.3.22git
docker:19.03.5github
kubernetes:v1.16.10web
基礎信息docker
主機名shell |
IP地址json |
安裝的軟件bootstrap |
備註 |
cd-k8s-master-etcd-1 |
192.168.1.61 |
ansible、docker、etcd、kube-apiserver、kube-scheduler、kube-controller-manager、flannel、Haproxy、KeepAlived |
這檯安裝了Ansible,若無特別說明,全部操做均在這臺操做 |
cd-k8s-master-etcd-2 |
192.168.1.105 |
ansible、docker、etcd、kube-apiserver、kube-scheduler、kube-controller-manager、flannel、Haproxy、KeepAlived |
|
cd-k8s-master-etcd-3 |
192.168.1.209 |
ansible、docker、etcd、kube-apiserver、kube-scheduler、kube-controller-manager、flannel、Haproxy、KeepAlived |
|
cd-k8s-node-1 |
192.168.1.107 |
docker、kubelet、kube-proxy、flannel |
|
cd-k8s-node-2 |
192.168.1.211 |
docker、kubelet、kube-proxy、flannel |
|
cd-k8s-node-3 |
192.168.1.62 |
docker、kubelet、kube-proxy、flannel |
|
192.168.1.3 |
VIP |
VIP |
1、初始化配置
1.配置全部主機名
mkdir -p /root/files/
cd /root/files/
cat > /root/files/hosts << EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.61 cd-k8s-master-etcd-1
192.168.1.105 cd-k8s-master-etcd-2
192.168.1.209 cd-k8s-master-etcd-3
192.168.1.107 cd-k8s-node-1
192.168.1.211 cd-k8s-node-2
192.168.1.62 cd-k8s-node-3
EOF
2.創建ansible主機清單
mkdir -p /etc/ansible/roles/k8s/
cat > /etc/ansible/roles/k8s/hosts <<EOF
[all]
192.168.1.61
192.168.1.105
192.168.1.209
192.168.1.107
192.168.1.211
192.168.1.62
[master]
192.168.1.61
192.168.1.105
192.168.1.209
[etcd]
192.168.1.61
192.168.1.105
192.168.1.209
[node]
192.168.1.107
192.168.1.211
192.168.1.62
[test]
192.168.1.62
EOF
3.初始化各個節點信息
cat > /root/files/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
cat > /etc/ansible/roles/k8s/init.yaml << EOF
---
- name: Init K8S env
hosts: all
gather_facts: False
tasks:
- name: close swapoff
shell: swapoff -a
- name: close firewalld
shell: systemctl stop firewalld.service && systemctl disable firewalld.service
# - name: close selinux
# shell: setenforce 0
- name: sed selinux
shell: sed -i 's/enforcing/disabled/' /etc/selinux/config
- name: copy k8s.conf
copy: src=/root/files/k8s.conf dest=/etc/sysctl.d/k8s.conf owner=root group=root force=yes
- name: sysctl -p
shell: sysctl --system
- name: copy /etc/hosts
copy: src=/root/files/hosts dest=/etc/hosts owner=root group=root force=yes
- name: yum install -y wget
shell: yum install -y wget
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/init.yaml
二.安裝docker
cat > /etc/ansible/roles/k8s/docker_init.yaml <<EOF
---
- name: Install Docker
hosts: all
gather_facts: False
tasks:
- name: remove old docker version
shell: yum remove -y docker docker-ce docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine
- name: add yum tools
shell: yum install -y yum-utils
- name: add docker yum repos
shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
- name: Install docker
shell: yum install -y docker-ce-19.03.5 docker-ce-cli-19.03.5 containerd.io
- name: start docker
shell: systemctl enable docker && systemctl start docker
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/docker_init.yaml
5.CFSSL工具安裝
關於CFSSL證書介紹,參見:http://www.javashuo.com/article/p-tefktauk-bb.html
mkdir -p /root/soft/ && cd /root/soft/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/bin/cfssl
mv cfssljson_linux-amd64 /usr/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
三.生成CA證書並分發到全部節點
mkdir -p /opt/k8s/certs/ && cd /opt/k8s/certs/
cat > /opt/k8s/certs/ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
cat > /opt/k8s/certs/ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
cat > /etc/ansible/roles/k8s/copy_ca.yaml <<EOF
---
- name: Copy CA file to all machine
hosts: all
gather_facts: False
tasks:
- name: mkdir -p /etc/kubernetes/ssl/
shell: mkdir -p /etc/kubernetes/ssl/
- name: copy ca files
copy: src=/opt/k8s/certs/ca.pem dest=/etc/kubernetes/ssl/ca.pem
- name: copy ca-key files
copy: src=/opt/k8s/certs/ca-key.pem dest=/etc/kubernetes/ssl/ca-key.pem
- name: copy ca.csr files
copy: src=/opt/k8s/certs/ca.csr dest=/etc/kubernetes/ssl/ca.csr
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_ca.yaml
四.安裝etcd集羣
1.建立證書
cd /opt/k8s/certs/
cat > /opt/k8s/certs/etcd-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.1.61",
"192.168.1.105",
"192.168.1.209",
"192.168.1.3",
"cd-k8s-master-etcd-1",
"cd-k8s-master-etcd-2",
"cd-k8s-master-etcd-3"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cd /opt/k8s/certs
cfssl gencert -ca=/opt/k8s/certs/ca.pem -ca-key=/opt/k8s/certs/ca-key.pem -config=/opt/k8s/certs/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
cat > /root/files/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \\
--data-dir=/var/lib/etcd \\
--name=HOSTNAME \\
--cert-file=/etc/kubernetes/ssl/etcd.pem \\
--key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--peer-cert-file=/etc/kubernetes/ssl/etcd.pem \\
--peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \\
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\
--peer-client-cert-auth \\
--client-cert-auth \\
--listen-peer-urls=https://IP:2380 \\
--initial-advertise-peer-urls=https://IP:2380 \\
--listen-client-urls=https://IP:2379,http://127.0.0.1:2379 \\
--advertise-client-urls=https://IP:2379 \\
--initial-cluster-token=etcd-cluster-0 \\
--initial-cluster=cd-k8s-master-etcd-1=https://192.168.1.61:2380,cd-k8s-master-etcd-2=https://192.168.1.105:2380,cd-k8s-master-etcd-3=https://192.168.1.209:2380 \
--initial-cluster-state=new \\
--auto-compaction-mode=periodic \\
--auto-compaction-retention=1 \\
--max-request-bytes=33554432 \\
--quota-backend-bytes=6442450944 \\
--heartbeat-interval=250 \\
--election-timeout=2000
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
#紅色部分是須要填寫各個etcd集羣的主機名及IP地址
2.分發證書並啓動etcd集羣
cat > /etc/ansible/roles/k8s/etcd_init.yaml <<EOF
---
- name: Init etcd Cluster
hosts: etcd
gather_facts: False
tasks:
# - name: add etcd user
# shell: useradd -s /sbin/nologin etcd
- name: mkdir etcd
shell: mkdir -p /var/lib/etcd && mkdir -p /etc/kubernetes/ssl && chown -R etcd:etcd /var/lib/etcd
- name: copy etcd files
copy: src=/root/files/etcd dest=/usr/bin/etcd owner=root group=root force=yes mode=755
- name: cpoy etcdctl file
copy: src=/root/files/etcdctl dest=/usr/bin/etcdctl owner=root group=root force=yes mode=755
- name: copy etcd.service file
copy: src=/root/files/etcd.service dest=/etc/systemd/system/etcd.service owner=root group=root force=yes mode=755
- name: replace etcd.service info
shell: IP=\`ip addr|grep "192.168.1."|awk '{print \$2}'|awk -F '/' '{print \$1}'\` && sed -i 's#IP#'\${IP}'#g' /etc/systemd/system/etcd.service && hostname=\`hostname\` && sed -i 's#HOSTNAME#'\${hostname}'#g' /etc/systemd/system/etcd.service
- name: copy cert
copy: src=/opt/k8s/certs/ca.pem dest=/etc/kubernetes/ssl/ca.pem owner=root group=root force=yes mode=755
- name: copy ca-key.pem
copy: src=/opt/k8s/certs/ca-key.pem dest=/etc/kubernetes/ssl/ca-key.pem owner=root group=root force=yes mode=755
- name: copy etcd.pem
copy: src=/opt/k8s/certs/etcd.pem dest=/etc/kubernetes/ssl/etcd.pem owner=root group=root force=yes mode=755
- name: copy etcd-key.pem
copy: src=/opt/k8s/certs/etcd-key.pem dest=/etc/kubernetes/ssl/etcd-key.pem owner=root group=root force=yes mode=755
- name: start etcd.service
shell: systemctl daemon-reload && systemctl enable etcd.service && systemctl restart etcd.service
EOF
ansible etcd -i /etc/ansible/roles/k8s/hosts -m group -a 'name=etcd'
ansible etcd -i /etc/ansible/roles/k8s/hosts -m user -a 'name=etcd group=etcd comment="etcd user" shell=/sbin/nologin home=/var/lib/etcd createhome=no'
ansible etcd -i /etc/ansible/roles/k8s/hosts -m file -a 'path=/var/lib/etcd state=directory owner=etcd group=etcd'
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/etcd_init.yaml
2.查看etcd集羣狀態
#etcd3.3版本執行以下:
etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
5、安裝Master組件
1.建立證書
mkdir -p /opt/k8s/certs/
cat > /opt/k8s/certs/admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cd /opt/k8s/certs/
cfssl gencert -ca=/opt/k8s/certs/ca.pem -ca-key=/opt/k8s/certs/ca-key.pem -config=/opt/k8s/certs/ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
cat >/etc/ansible/roles/k8s/master_init.yaml <<EOF
---
- name: Copy k8s master files
hosts: master
gather_facts: False
tasks:
- name: copy kube_apiserver
copy: src=/root/files/kube-apiserver dest=/usr/bin/kube-apiserver owner=root group=root mode=755
- name: copy kube-scheduler
copy: src=/root/files/kube-scheduler dest=/usr/bin/kube-scheduler owner=root group=root mode=755
- name: copy kubectl file
copy: src=/root/files/kubectl dest=/usr/bin/kubectl owner=root group=root mode=755
- name: copy kube-controller-manager
copy: src=/root/files/kube-controller-manager dest=/usr/bin/kube-controller-manager owner=root group=root mode=755
- name: copy admin.pem
copy: src=/opt/k8s/certs/admin.pem dest=/etc/kubernetes/ssl/admin.pem owner=root group=root
- name: copy admin-key.pem
copy: src=/opt/k8s/certs/admin-key.pem dest=/etc/kubernetes/ssl/admin-key.pem owner=root group=root
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/master_init.yaml
cat >/etc/ansible/roles/k8s/node_init.yaml <<EOF
---
- name: Copy k8s master files
hosts: node
gather_facts: False
tasks:
- name: copy kube_proxy
copy: src=/root/files/kube-proxy dest=/usr/bin/kube-proxy owner=root group=root mode=755
- name: copy kubelet
copy: src=/root/files/kubelet dest=/usr/bin/kubelet owner=root group=root mode=755
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/node_init.yaml
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443
# 設置客戶端認證參數
kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/etc/kubernetes/ssl/admin-key.pem
#設置上下文參數
kubectl config set-context admin@kubernetes \
--cluster=kubernetes \
--user=admin
# 設置默認上下文
kubectl config use-context admin@kubernetes
2.分發kube文件
cat >/etc/ansible/roles/k8s/copy_kubeconfig.yaml <<EOF
---
- name: Copy k8s kubeconfig files
hosts: master
gather_facts: False
tasks:
- name: mkdir -p home kube dir
shell: mkdir -p /root/.kube
- name: copy kubeconfig
copy: src=/root/.kube/config dest=/root/.kube/config owner=root group=root
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_kubeconfig.yaml
3.部署kube-apiserver組件
cat > /opt/k8s/certs/kubernetes-csr.json<<EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.1.61",
"192.168.1.105",
"192.168.1.209",
"192.168.1.3",
"10.0.0.1",
"localhost",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cd /opt/k8s/certs/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
cat >/etc/ansible/roles/k8s/copy_master_k8s-key.yaml <<EOF
---
- name: Copy Kubernetes key files
hosts: master
gather_facts: False
tasks:
- name: mkdir -p /etc/kubernetes/ssl
shell: mkdir -p /etc/kubernetes/ssl/
- name: copy kubernetes.pem file
copy: src=/opt/k8s/certs/kubernetes.pem dest=/etc/kubernetes/ssl/kubernetes.pem owner=root group=root
- name: copy kubernetes-key.pem file
copy: src=/opt/k8s/certs/kubernetes-key.pem dest=/etc/kubernetes/ssl/kubernetes-key.pem owner=root group=root
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_master_k8s-key.yaml
###問題 若使用kubect get cs 出現
The connection to the server 127.0.0.1:6443 was refused - did you specify the right host or port?
解決方法:臨時將:vi $HOME/.kube/config
將
server: https://127.0.0.1:6443
改成
server: https://192.168.1.105:6443
這裏每一個節點都要改一下,有可能kube-scheduler啓動會出現failed to acquire lease kube-system/kube-scheduler這樣的錯誤
#配置kube-apiserver客戶端使用的token文件
#建立 TLS Bootstrapping Token
[root@cd-k8s-master-etcd-1 k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
211dc6d5d7afb8a6e948507597c2c9f8
[root@cd-k8s-master-etcd-1 k8s]# mkdir -p /etc/kubernetes/config
[root@cd-k8s-master-etcd-1 k8s]# cat <<EOF > /etc/kubernetes/config/token.csv
211dc6d5d7afb8a6e948507597c2c9f8,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
cat >/etc/ansible/roles/k8s/copy_master_k8s-key.yaml <<EOF
---
- name: Copy token to master
hosts: master
gather_facts: False
tasks:
- name: mkdir -p /etc/kubernetes/config
shell: mkdir -p /etc/kubernetes/config/
- name: copy token file to master
copy: src=/etc/kubernetes/config/token.csv dest=/etc/kubernetes/config/token.csv owner=root group=root
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_master_k8s-key.yaml
#建立apiserver配置文件
cat <<EOF >/root/files/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=https://192.168.1.61:2379,https://192.168.1.105:2379,https://192.168.1.209:2379 \\
--bind-address=IP \\
--secure-port=6443 \\
--advertise-address=IP \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/16 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth \\
--token-auth-file=/etc/kubernetes/config/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \\
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem"
EOF
#建立kube-apiserver啓動文件
cat > /root/files/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/etc/kubernetes/config/kube-apiserver
ExecStart=/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
cat >/etc/ansible/roles/k8s/copy_master_kube-apiserver.yaml <<EOF
---
- name: Copy kube-apiserver file to master
hosts: master
gather_facts: False
tasks:
- name: mkdir /etc/kubernetes/config/
shell: mkdir -p /etc/kubernetes/config/
- name: copy kube-apiserver config file
copy: src=/root/files/kube-apiserver dest=/etc/kubernetes/config/kube-apiserver owner=root group=root force=yes
- name: replace etcd.service info
shell: IP=\`ip addr|grep "192.168.1."|awk '{print \$2}'|awk -F '/' '{print \$1}'\` && sed -i 's#IP#'\${IP}'#g' /etc/kubernetes/config/kube-apiserver
- name: copy kube-apiserver.service
copy: src=/root/files/kube-apiserver.service dest=/usr/lib/systemd/system/kube-apiserver.service owner=root group=root force=yes mode=755
- name: systemctl reload && add reboot machine && start kube-apiserver.service
shell: systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_master_kube-apiserver.yaml
#查看啓動狀況
[root@cd-k8s-master-etcd-1 k8s]# ansible master -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl status kube-apiserver"|grep "Active"
4.部署kube-scheduler組件
cat > /root/files/kube-scheduler <<EOF
KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect"
EOF
#參數說明: --master 鏈接本地apiserver --leader-elect 當該組件啓動多個時,自動選舉(HA) |
cat > /root/files/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/config/kube-scheduler
ExecStart=/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
cat >/etc/ansible/roles/k8s/copy_master_kube-scheduler.yaml <<EOF
---
- name: Copy kube-scheduler file to master
hosts: master
gather_facts: False
tasks:
- name: mkdir /etc/kubernetes/config/
shell: mkdir -p /etc/kubernetes/config/
- name: copy kube-scheduler config
copy: src=/root/files/kube-scheduler dest=/etc/kubernetes/config/kube-scheduler owner=root group=root force=yes
- name: copy kube-scheduler.service
copy: src=/root/files/kube-scheduler.service dest=/usr/lib/systemd/system/kube-scheduler.service owner=root group=root force=yes mode=755
- name: systemctl reload && add reboot machine && start kube-apiserver.service
shell: systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler.service
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_master_kube-scheduler.yaml
#查看kube-scheduler啓動狀態
[root@cd-k8s-master-etcd-1 k8s]# ansible master -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl status kube-scheduler"|grep "Active"
5.部署controller-manager組件
cat >/root/files/kube-controller-manager <<EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=127.0.0.1:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"
EOF
cat >/root/files/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/etc/kubernetes/config/kube-controller-manager
ExecStart=/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
cat >/etc/ansible/roles/k8s/copy_master_kube-controller-manager.yaml <<EOF
---
- name: Copy Kube-controller-manager file to master
hosts: master
gather_facts: False
tasks:
- name: mkdir /etc/kubernetes/config/
shell: mkdir -p /etc/kubernetes/config/
- name: copy kube-controller-manager config file
copy: src=/root/files/kube-controller-manager dest=/etc/kubernetes/config/kube-controller-manager owner=root group=root force=yes
- name: copy kube-controller-manager.service
copy: src=/root/files/kube-controller-manager.service dest=/usr/lib/systemd/system/kube-controller-manager.service owner=root group=root force=yes mode=755
- name: systemctl reload && add reboot machine && start kube-controller-manager.service
shell: systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager.service
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_master_kube-controller-manager.yaml
#查看kube-controller-manager組件啓動狀態
[root@cd-k8s-master-etcd-1 k8s]# ansible master -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl status kube-controller-manager" |grep "Active"
6、master高可用部署(Haproxy+KeepAlived)
#安裝Haproxy和KeepAlived
cat >/root/files/haproxy.cfg << EOF
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /var/run/haproxy-admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
nbproc 1
defaults
log global
timeout connect 5000
timeout client 10m
timeout server 10m
listen admin_stats
bind 0.0.0.0:10080
mode http
log 127.0.0.1 local0 err
stats refresh 30s
stats uri /status
stats realm welcome login\ Haproxy
stats auth admin:123456
stats hide-version
stats admin if TRUE
listen kube-master
bind 0.0.0.0:8443
mode tcp
option tcplog
balance roundrobin
server 192.168.1.61 192.168.1.61:6443 check inter 2000 fall 2 rise 2 weight 1
server 192.168.1.109 192.168.1.109:6443 check inter 2000 fall 2 rise 2 weight 1
server 192.168.1.205 192.168.1.205:6443 check inter 2000 fall 2 rise 2 weight 1
EOF
cat >/etc/ansible/roles/k8s/install_ha.yaml <<EOF
---
- name: Install Haproxy && KeepAlive software
hosts: master
gather_facts: False
tasks:
- name: Install Haproxy
shell: yum install -y haproxy
- name: Install KeepAlive
shell: yum install -y keepalived
# - name: backup haproxy config
# shell: mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
- name: copy haproxy config
copy: src=/root/files/haproxy.cfg dest=/etc/haproxy/haproxy.cfg owner=haproxy group=haproxy force=yes
- name: start haproxy
shell: systemctl enable haproxy && systemctl restart haproxy
# - name: backup KeepAlived config
# shell: cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/install_ha.yaml
#查看Haproxy啓動狀態
[root@cd-k8s-master-etcd-1 k8s]# ansible master -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl status haproxy"|grep "Active"
#配置KeepAlived
[root@cd-k8s-master-etcd-1 keepalived]# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived global_defs { router_id master1 } vrrp_instance VI_1 { state MASTER interface ens192 virtual_router_id 51 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.1.3 } } |
說明: 1)、global_defs 只保留 router_id(每一個節點都不一樣); 2)、修改 interface(vip綁定的網卡),及 virtual_ipaddress(vip地址及掩碼長度); 3)、其餘節點只需修改 state 爲 BACKUP,優先級 priority 低於100便可。 |
所有配置信息以下:
[root@cd-k8s-master-etcd-1 keepalived]# ansible master -i /etc/ansible/roles/k8s/hosts -m shell -a "cat /etc/keepalived/keepalived.conf"
192.168.1.105 | CHANGED | rc=0 >>
! Configuration File for keepalived
global_defs {
router_id master2
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
virtual_router_id 51
priority 82
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.1.3
}
}
192.168.1.61 | CHANGED | rc=0 >>
! Configuration File for keepalived
global_defs {
router_id master1
}
vrrp_instance VI_1 {
state MASTER
interface ens192
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.1.3
}
}
192.168.1.209 | CHANGED | rc=0 >>
! Configuration File for keepalived
global_defs {
router_id master3
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
virtual_router_id 51
priority 83
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.1.3
}
}
#啓動KeepAlive服務
[root@cd-k8s-master-etcd-1 keepalived]# ansible master -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl enable keepalived && systemctl start keepalived"
#查看VIP信息
[root@cd-k8s-master-etcd-1 keepalived]# ansible master -i /etc/ansible/roles/k8s/hosts -m shell -a "ip addr|grep inet"
7、Node節點部署
Master apiserver啓用TLS認證後,Node節點kubelet組件想要加入集羣,必須使用CA簽發的有效證書才能與apiserver通訊,當Node節點不少時,簽署證書是一件很繁瑣的事情,所以有了TLS Bootstrapping機制,kubelet會以一個低權限用戶自動向apiserver申請證書,kubelet的證書由apiserver動態簽署。
1.建立kubelet bootstrap kubeconfig文件
[root@cd-k8s-master-etcd-1 files]# cat /etc/kubernetes/config/token.csv
211dc6d5d7afb8a6e948507597c2c9f8,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
cat > /root/files/environment.sh << EOF
# 建立kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=211dc6d5d7afb8a6e948507597c2c9f8
KUBE_APISERVER="https://192.168.1.3:8443"
# 設置集羣參數
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=\${KUBE_APISERVER} \\
--kubeconfig=bootstrap.kubeconfig
# 設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \\
--token=${BOOTSTRAP_TOKEN} \\
--kubeconfig=bootstrap.kubeconfig
# 設置上下文參數
kubectl config set-context default \\
--cluster=kubernetes \\
--user=kubelet-bootstrap \\
--kubeconfig=bootstrap.kubeconfig
# 設置默認上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
EOF
[root@cd-k8s-master-etcd-1 files]# sh /root/files/environment.sh
2.建立kubelet.kubeconfig文件
cat > /root/files/envkubelet.kubeconfig.sh <<EOF
# 建立kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=211dc6d5d7afb8a6e948507597c2c9f8
KUBE_APISERVER="https://192.168.1.3:8443"
# 設置集羣參數
kubectl config set-cluster kubernetes \\
--certificate-authority=/etc/kubernetes/ssl/ca.pem \\
--embed-certs=true \\
--server=\${KUBE_APISERVER} \\
--kubeconfig=kubelet.kubeconfig
# 設置客戶端認證參數
kubectl config set-credentials kubelet \\
--token=\${BOOTSTRAP_TOKEN} \\
--kubeconfig=kubelet.kubeconfig
# 設置上下文參數
kubectl config set-context default \\
--cluster=kubernetes \\
--user=kubelet \\
--kubeconfig=kubelet.kubeconfig
# 設置默認上下文
kubectl config use-context default --kubeconfig=kubelet.kubeconfig
EOF
[root@cd-k8s-master-etcd-1 files]# sh /root/files/envkubelet.kubeconfig.sh
5.將kubelet-bootstrap用戶綁定到系統集羣角色
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
6.建立kubelet參數配置模板文件
cat > /root/files/kubelet << EOF
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=HOSTNAME \\
--kubeconfig=/etc/kubernetes/config/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/config/bootstrap.kubeconfig \\
--config=/etc/kubernetes/config/kubelet-config.yaml \\
--cert-dir=/etc/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
cat > /root/files/bootstrap.kubeconfig << EOF
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/ssl/ca.pem
server: https://192.168.1.3:8443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: 211dc6d5d7afb8a6e948507597c2c9f8
EOF
cat > /root/files/kubelet-config.yaml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: true
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
cat > /root/files/kubelet.kubeconfig << EOF
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/ssl/ca.pem
server: https://192.168.1.3:8443
name: default-cluster
contexts:
- context:
cluster: default-cluster
namespace: default
user: default-auth
name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: default-auth
user:
client-certificate: /etc/kubernetes/ssl/kubelet-client-current.pem
client-key: /etc/kubernetes/ssl/kubelet-client-current.pem
EOF
cat > /root/files/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/etc/kubernetes/config/kubelet
ExecStart=/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
cat >/etc/ansible/roles/k8s/copy_node-kubelet.yaml <<EOF
---
- name: Copy Node files to nodes
hosts: node
gather_facts: False
tasks:
- name: mkdir /etc/kubernetes/config/
shell: mkdir -p /etc/kubernetes/config/
- name: copy kubelet file
copy: src=/root/files/kubelet dest=/etc/kubernetes/config/kubelet owner=root group=root force=yes
- name: copy bootstrap.kubeconfig file
copy: src=/root/files/bootstrap.kubeconfig dest=/etc/kubernetes/config/bootstrap.kubeconfig owner=root group=root force=yes
- name: copy kubelet-config.yaml file
copy: src=/root/files/kubelet-config.yaml dest=/etc/kubernetes/config/kubelet-config.yaml owner=root group=root force=yes
- name: copy kubelet.kubeconfig file
copy: src=/root/files/kubelet.kubeconfig dest=/etc/kubernetes/config/kubelet.kubeconfig owner=root group=root force=yes
- name: copy kubelet.service file
copy: src=/root/files/kubelet.service dest=/usr/lib/systemd/system/kubelet.service owner=root group=root force=yes mode=755
- name: replace kubelet info
shell: HOSTNAME=\`hostname\` && sed -i 's#HOSTNAME#'\${HOSTNAME}'#g' /etc/kubernetes/config/kubelet
- name: add reboot and start kubelet.service
shell: systemctl enable kubelet.service && systemctl start kubelet.service
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_node-kubelet.yaml
[root@cd-k8s-master-etcd-1 k8s]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-Arp8iLv-iuvwaRnKx0K-EFa1vBXh-byRJzqxqj5jFAs 3s kubelet-bootstrap Pending
node-csr-w8UjzWe9HAz583WTP0iI7kY8bojJaX0s0cEAyPrx4qo 50m kubelet-bootstrap Approved,Issued
node-csr-zqE1LTAyJ3POfd0wUuYdvfhWsyucuEB4I43LP-5jAxI 2s kubelet-bootstrap Pending
[root@cd-k8s-master-etcd-1 k8s]# kubectl certificate approve node-csr-Arp8iLv-iuvwaRnKx0K-EFa1vBXh-byRJzqxqj5jFAs node-csr-w8UjzWe9HAz583WTP0iI7kY8bojJaX0s0cEAyPrx4qo node-csr-zqE1LTAyJ3POfd0wUuYdvfhWsyucuEB4I43LP-5jAxI
certificatesigningrequest.certificates.k8s.io/node-csr-Arp8iLv-iuvwaRnKx0K-EFa1vBXh-byRJzqxqj5jFAs approved
certificatesigningrequest.certificates.k8s.io/node-csr-w8UjzWe9HAz583WTP0iI7kY8bojJaX0s0cEAyPrx4qo approved
certificatesigningrequest.certificates.k8s.io/node-csr-zqE1LTAyJ3POfd0wUuYdvfhWsyucuEB4I43LP-5jAxI approved
[root@cd-k8s-master-etcd-1 k8s]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-Arp8iLv-iuvwaRnKx0K-EFa1vBXh-byRJzqxqj5jFAs 74s kubelet-bootstrap Approved,Issued
node-csr-w8UjzWe9HAz583WTP0iI7kY8bojJaX0s0cEAyPrx4qo 52m kubelet-bootstrap Approved,Issued
node-csr-zqE1LTAyJ3POfd0wUuYdvfhWsyucuEB4I43LP-5jAxI 73s kubelet-bootstrap Approved,Issued
[root@cd-k8s-master-etcd-1 k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.1.211 Ready <none> 51m v1.16.10
cd-k8s-node-1 Ready <none> 2m20s v1.16.10
cd-k8s-node-3 Ready <none> 2m20s v1.16.10
3.建立kube-proxy kubeconfig文件
cat > /opt/k8s/certs/kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cd /opt/k8s/certs/
cfssl gencert -ca=/opt/k8s/certs/ca.pem \
-ca-key=/opt/k8s/certs/ca-key.pem \
-config=/opt/k8s/certs/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
ansible node -i /etc/ansible/roles/k8s/hosts -m copy -a "src=/opt/k8s/certs/kube-proxy.pem dest=/etc/kubernetes/ssl/kube-proxy.pem force=yes"
ansible node -i /etc/ansible/roles/k8s/hosts -m copy -a "src=/opt/k8s/certs/kube-proxy-key.pem dest=/etc/kubernetes/ssl/kube-proxy-key.pem force=yes"
4.建立kube-proxy kubeconfig文件
cat > /root/files/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=4 \\
--log-dir=/var/log/kubernetes/logs \\
--config=/etc/kubernetes/config/kube-proxy-config.yaml"
EOF
cat > /root/files/kube-proxy.kubeconfig << EOF
apiVersion: v1
clusters:
- cluster:
certificate-authority: /etc/kubernetes/ssl/ca.pem
server: https://192.168.1.3:8443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: /etc/kubernetes/ssl/kube-proxy.pem
client-key: /etc/kubernetes/ssl/kube-proxy-key.pem
EOF
cat > /root/files/kube-proxy-config.yaml<< EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /etc/kubernetes/config/kube-proxy.kubeconfig
hostnameOverride: HOSTNAME
clusterCIDR: 10.0.0.0/24
mode: ipvs
ipvs:
scheduler: "rr"
iptables:
masqueradeAll: true
EOF
cat > /root/files/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/config/kube-proxy.conf
ExecStart=/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
cat >/etc/ansible/roles/k8s/copy_node-kube-proxy.yaml <<EOF
---
- name: Copy Node files to nodes
hosts: node
gather_facts: False
tasks:
- name: mkdir /etc/kubernetes/config/
shell: mkdir -p /etc/kubernetes/config/
- name: mkdir /var/log/kubernetes/logs
shell: mkdir -p /var/log/kubernetes/logs
- name: copy kube-proxy.conf file
copy: src=/root/files/kube-proxy.conf dest=/etc/kubernetes/config/kube-proxy.conf owner=root group=root force=yes
- name: copy kube-proxy.kubeconfig file
copy: src=/root/files/kube-proxy.kubeconfig dest=/etc/kubernetes/config/kube-proxy.kubeconfig owner=root group=root force=yes
- name: copy kube-proxy-config.yaml file
copy: src=/root/files/kube-proxy-config.yaml dest=/etc/kubernetes/config/kube-proxy-config.yaml owner=root group=root force=yes
- name: copy kube-proxy.service file
copy: src=/root/files/kube-proxy.service dest=/usr/lib/systemd/system/kube-proxy.service owner=root group=root force=yes mode=755
- name: replace kube-proxy-config.yaml info
shell: HOSTNAME=\`hostname\` && sed -i 's#HOSTNAME#'\${HOSTNAME}'#g' /etc/kubernetes/config/kube-proxy-config.yaml
- name: add reboot and start
shell: systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_node-kube-proxy.yaml
ansible node -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl restart kube-proxy"
ansible node -i /etc/ansible/roles/k8s/hosts -m shell -a "yum install -y ipvsadm"
ansible node -i /etc/ansible/roles/k8s/hosts -m shell -a "ipvsadm -Ln"
8、配置Flannel網絡
1.建立flannel證書及私鑰
cd /opt/k8s/certs
cat >flanneld-csr.json<<EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=/opt/k8s/certs/ca.pem \
-ca-key=/opt/k8s/certs/ca-key.pem \
-config=/opt/k8s/certs/ca-config.json \
-profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
cat >/root/files/flanneld.service <<EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/usr/bin/flanneld --ip-masq \\
-etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
-etcd-certfile=/etc/kubernetes/ssl/flanneld.pem \\
-etcd-keyfile=/etc/kubernetes/ssl/flanneld-key.pem \\
-etcd-endpoints=https://192.168.1.61:2379,https://192.168.1.105:2379,https://192.168.1.209:2379 \
-etcd-prefix=/coreos.com/network/
ExecStartPost=/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
cat >/etc/ansible/roles/k8s/copy_flannel.yaml <<EOF
---
- name: Copy Node files to nodes
hosts: all
gather_facts: False
tasks:
- name: copy flanneld.pem file
copy: src=/opt/k8s/certs/flanneld.pem dest=/etc/kubernetes/ssl/flanneld.pem owner=root group=root force=yes
- name: copy flanneld-key.pem file
copy: src=/opt/k8s/certs/flanneld-key.pem dest=/etc/kubernetes/ssl/flanneld-key.pem owner=root group=root force=yes
- name: copy flanneld file
copy: src=/root/files/flanneld dest=/usr/bin/flanneld owner=root group=root force=yes mode=755
- name: copy mk-docker-opts.sh file
copy: src=/root/files/mk-docker-opts.sh dest=/usr/bin/mk-docker-opts.sh owner=root group=root force=yes mode=755
- name: copy /root/files/flanneld.service file
copy: src=/root/files/flanneld.service dest=/usr/lib/systemd/system/flanneld.service owner=root group=root force=yes mode=755
EOF
ansible-playbook -i /etc/ansible/roles/k8s/hosts /etc/ansible/roles/k8s/copy_flannel.yaml
#etcd 3.3版本,執行以下:
etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem mkdir /coreos.com/network/
etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem mk /coreos.com/network/config '{"Network":"10.0.0.0/16","SubnetLen":24,"Backend":{"Type":"vxlan"}}'
ansible all -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl daemon-reload && systemctl enable flanneld && systemctl start flanneld"
#查看全部集羣主機的網絡狀況
etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem ls /coreos.com/network/subnets
2.配置Docker啓動指定子網段
cat > /root/files/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
ansible all -i /etc/ansible/roles/k8s/hosts -m copy -a "src=/root/files/docker.service dest=/usr/lib/systemd/system/docker.service mode=755"
ansible all -i /etc/ansible/roles/k8s/hosts -m shell -a "systemctl daemon-reload && systemctl restart docker.service"
3.建立pod進行測試集羣
kubectl run nginx --image=nginx:1.16.0
kubectl expose deployment nginx --port 80 --type LoadBalancer
kubectl get pods -o wide
kubectl get svc
4.經過scale命令擴展應用
kubectl scale deployments/nginx --replicas=4
kubectl get pods -o wide
9、部署Dashboard V2.0(beta5)
1.下載並修改DashBoard安裝腳本
mkdir -p $HOME/dashboard-certs && cd $HOME/dashboard-certs
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yaml
###修改以下內容:
修改recommended.yaml文件內容
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #新加
ports:
- port: 443
targetPort: 8443
nodePort: 30001 #新加
selector:
k8s-app: kubernetes-dashboard
#由於自動生成的證書不少瀏覽器沒法使用,因此咱們本身建立,註釋掉kubernetes-dashboard-certs對象聲明
#apiVersion: v1
#kind: Secret
#metadata:
# labels:
# k8s-app: kubernetes-dashboard
# name: kubernetes-dashboard-certs
# namespace: kubernetes-dashboard
#type: Opaque
2.建立證書
mkdir -p $HOME/dashboard-certs && cd $HOME/dashboard-certs
# 建立key文件
openssl genrsa -out dashboard.key 2048
#證書請求
openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'
#自簽證書
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#建立命名空間
kubectl create namespace kubernetes-dashboard
kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous
#建立kubernetes-dashboard-certs對象
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
3.安裝Dashboard
#安裝
kubectl create -f ./recommended.yaml
#檢查結果
kubectl get pods -A -o wide
kubectl get service -n kubernetes-dashboard -o wide
4.建立服務帳戶
cat > ./dashboard-admin.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: dashboard-admin
namespace: kubernetes-dashboard
EOF
kubectl apply -f dashboard-admin.yaml
5.建立集羣角色綁定
cat >./dashboard-admin-bind-cluster-role.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin-bind-cluster-role
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kubernetes-dashboard
EOF
kubectl apply -f dashboard-admin-bind-cluster-role.yaml
6.獲取用戶登陸Token
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-token | awk '{print $1}')
7.查看Dashboard狀態
kubectl get pods -o wide -A
8.登陸dashboard驗證服務
kubectl cluster-info
打開以下網址(使用Firefox瀏覽器)
或
https://192.168.1.211:30001/#/overview?namespace=default
10、安裝CoreDNS服務
1.下載相關文件
mkdir /opt/coredns && cd /opt/coredns/
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
chmod +x deploy.sh
#查看其網段
kubectl get svc
2.修改部署文件
修改部署文件
修改$DNS_DOMAIN、$DNS_SERVER_IP變量爲實際值,並修改image後面的鏡像。
這裏直接用deploy.sh腳本進行修改:
./deploy.sh -s -r 10.0.0.0/16 -i 10.0.0.2 -d cluster.local > coredns.yaml
注意:網段爲10.0.0.0/16(同apiserver定義的service-cluster-ip-range值,非kube-proxy中的cluster-cidr值),DNS的地址設置爲10.0.0.2
./deploy.sh -i 10.0.0.2 -d cluster.local > coredns.yaml
修改以下
apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system data: Corefile: | .:53 { errors health { lameduck 5s } ready kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure upstream fallthrough in-addr.arpa ip6.arpa } prometheus :9153 forward . /etc/resolv.conf cache 30 loop reload loadbalance } ....... image: harbor.ttsingops.com/coredns/coredns:1.5.0 ....... |
kubectl apply -f coredns.yaml
kubectl get svc,pod -n kube-system
3.修改kubelet的dns參數
全部node節點都要操做
ansible node -i /etc/ansible/roles/k8s/hosts -m lineinfile -a 'dest=/etc/kubernetes/config/kubelet regexp="pause-amd64" line="--cluster-dns=10.0.0.2 \
--cluster-domain=cluster.local. \
--resolv-conf=/etc/resolv.conf \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0\"
" '
ansible node -i /etc/ansible/roles/k8s/hosts -m shell -a 'systemctl daemon-reload && systemctl restart kubelet && systemctl status kubelet'
4.驗證CoreDNS服務解析
kubectl run busybox --image busybox:1.28 --restart=Never -it busybox -- sh
#到cd-k8s-node-1上執行
docker exec -it busybox sh
kubectl get pods -A -o wide
/ # nslookup kubernetes.default
/ # nslookup www.baidu.com
/ # cat /etc/resolv.conf
5.安裝metrics-server
mkdir -p /root/metrics-server && cd /root/metrics-server
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.6/components.yaml
將
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --secure-port=4443
修改以下:
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --secure-port=4443
6.Node上下載鏡像
ansible node -i /etc/ansible/roles/k8s/hosts -m shell -a 'docker pull bluersw/metrics-server-amd64:v0.3.6'
ansible node -i /etc/ansible/roles/k8s/hosts -m shell -a 'docker tag bluersw/metrics-server-amd64:v0.3.6 k8s.gcr.io/metrics-server-amd64:v0.3.6'
ansible node -i /etc/ansible/roles/k8s/hosts -m shell -a 'docker images'
kubectl create -f components.yaml
kubectl get pods --all-namespaces
#驗證
kubectl top node
登錄網頁查看是否有監控信息
若出現
[root@cd-k8s-master-etcd-1 ~]# kubectl top node
Error from server (NotFound): the server could not find the requested resource (get services http:heapster:)
則在每一個kube-apiserver的master節點上修改配置文件:/etc/kubernetes/config/kube-apiserver
添加以下:
--enable-aggregator-routing=true
最後重啓kube-apisever