kubernetes v1.15.4 部署手冊

kubernetes v1.15.4 部署手冊

 

 

配置要求

 

推薦在阿里雲採購以下配置:(也可使用本身的虛擬機、私有云等)node

  • 3臺 2核4G 的ECS(突發性能實例 t5 ecs.t5-c1m2.large或同等配置,單臺約 0.4元/小時,停機時不收費)linux

  • Cent OS 7.6nginx

 

安裝後的軟件版本爲git

  • Kubernetes v1.15.4github

    • calico 3.8.2docker

    • nginx-ingress 1.5.3shell

  • Docker 18.09.7json

 

檢查 centos / hostname

# 在 master 節點和 worker 節點都要執行vim

cat /etc/redhat-release後端

 

# 此處 hostname 的輸出將會是該機器在 Kubernetes 集羣中的節點名字

# 不能使用 localhost 做爲節點的名字

hostname

 

# 請使用 lscpu 命令,覈對 CPU 信息

# Architecture: x86_64 本安裝文檔不支持 arm 架構

# CPU(s): 2 CPU 內核數量不能低於 2

lscpu

 

修改 hostname

# 修改 hostname

hostnamectl set-hostname your-new-host-name

# 查看修改結果

hostnamectl status

# 設置 hostname 解析

echo "127.0.0.1 $(hostname)" >> /etc/hosts

 

安裝docker / kubelet

使用 root 身份在全部節點執行以下代碼,以安裝軟件:

  • docker

  • nfs-utils

  • kubectl / kubeadm / kubelet

 

#!/bin/bash

# 在 master 節點和 worker 節點都要執行

# 安裝 docker
# 參考文檔以下
# https://docs.docker.com/install/linux/docker-ce/centos/ 
# https://docs.docker.com/install/linux/linux-postinstall/

# 卸載舊版本
yum remove -y docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine

# 設置 yum repository
yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 安裝並啓動 docker
yum install -y docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io
systemctl enable docker
systemctl start docker

# 安裝 nfs-utils
# 必須先安裝 nfs-utils 才能掛載 nfs 網絡存儲
yum install -y nfs-utils

# 關閉 防火牆
systemctl stop firewalld
systemctl disable firewalld

# 關閉 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

# 關閉 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab

# 修改 /etc/sysctl.conf
# 若是有配置,則修改
sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g"  /etc/sysctl.conf
# 可能沒有,追加
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
# 執行命令以應用
sysctl -p

# 配置K8S的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 卸載舊版本
yum remove -y kubelet kubeadm kubectl

# 安裝kubelet、kubeadm、kubectl
yum install -y kubelet-1.15.4 kubeadm-1.15.4 kubectl-1.15.4

# 修改docker Cgroup Driver爲systemd
# # 將/usr/lib/systemd/system/docker.service文件中的這一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
# # 修改成 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
# 若是不修改,在添加 worker 節點時可能會碰到以下錯誤
# [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". 
# Please follow the guide at https://kubernetes.io/docs/setup/cri/
sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service

# 設置 docker 鏡像,提升 docker 鏡像下載速度和穩定性
# 若是您訪問 https://hub.docker.io 速度很是穩定,亦能夠跳過這個步驟
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io

# 重啓 docker,並啓動 kubelet
systemctl daemon-reload
systemctl restart docker
systemctl enable kubelet && systemctl start kubelet

docker version

 

 

 
  

 

初始化 master 節點

 

# 只在 master 節點執行

 

# 替換 x.x.x.x 爲 master 節點實際 IP(請使用內網 IP)

# export 命令只在當前 shell 會話中有效,開啓新的 shell 窗口後,若是要繼續安裝過程,請從新執行此處的 export 命令

export MASTER_IP=x.x.x.x

 

# 替換 apiserver.demo 爲 您想要的 dnsName (不建議使用 master 的 hostname 做爲 APISERVER_NAME)

export APISERVER_NAME=apiserver.master

 

# Kubernetes 容器組所在的網段,該網段安裝完成後,由 kubernetes 建立,事先並不存在於您的物理網絡中

export POD_SUBNET=10.100.0.1/20

echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts

 

 

#!/bin/bash

# 只在 master 節點執行

# 查看完整配置選項 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
rm -f ./kubeadm-config.yaml
cat <<EOF > ./kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.15.4
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: "${APISERVER_NAME}:6443"
networking:
  serviceSubnet: "10.96.0.0/16"
  podSubnet: "${POD_SUBNET}"
  dnsDomain: "cluster.local"
EOF

# kubeadm init
# 根據您服務器網速的狀況,您須要等候 3 - 10 分鐘
kubeadm init --config=kubeadm-config.yaml --upload-certs

# 配置 kubectl
rm -rf /root/.kube/
mkdir /root/.kube/
cp -i /etc/kubernetes/admin.conf /root/.kube/config

# 安裝 calico 網絡插件
# 參考文檔 https://docs.projectcalico.org/v3.8/getting-started/kubernetes/
rm -f calico.yaml
wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
sed -i "s#192\.168\.0\.0/16#${POD_SUBNET}#" calico.yaml
kubectl apply -f calico.yaml

 

 

檢查master初始化結果

 

# 只在 master 節點執行

 

# 執行以下命令,等待 3-10 分鐘,直到全部的容器組處於 Running 狀態

watch kubectl get pod -n kube-system -o wide

 

# 查看 master 節點初始化結果

kubectl get nodes -o wide

 

 

 

初始化 worker 節點

獲取 join 命令參數

在 master 節點執行

# 只在 master 節點執行

kubeadm token create --print-join-command

 

初始化 worker

對全部 worker 節點執行

# 只在 worker 節點執行

# 替換 ${MASTER_IP} 爲 master 節點實際 IP

# 替換 ${APISERVER_NAME} 爲初始化 master 節點時所使用的 APISERVER_NAME

# 可能會出現空白的狀況 那是應爲 worker節點沒有 這兩個環境變量

# 能夠手動添加 "masterip masterhostname" 到 hosts文件

echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts

 

# 替換爲 master 節點上 kubeadm token create 命令的輸出

kubeadm join apiserver.master:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303

 

 

查看初始化結果

在 master 節點執行

 

kubectl get nodes

 

NAME STATUS ROLES AGE VERSION

demo-master-a-1 Ready master 5m3s v1.15.4

demo-worker-a-1 Ready <none> 2m26s v1.15.4

demo-worker-a-2 Ready <none> 3m56s v1.15.4

 

使用 Ingress Controller

安裝 Ingress Controller

編輯 mandatory.yaml

將鏡像替換成 阿里雲的源

https://github.com/kubernetes/ingress-nginx/blob/master/deploy/static/mandatory.yaml

apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses/status
    verbs:
      - update

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/part-of: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    spec:
      # wait up to five minutes for the drain of connections
      terminationGracePeriodSeconds: 300
      serviceAccountName: nginx-ingress-serviceaccount
      nodeSelector:
        kubernetes.io/os: linux
      containers:
        - name: nginx-ingress-controller
          image: registry.aliyuncs.com/google_containers/nginx-ingress-controller:0.26.1
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
            - --annotations-prefix=nginx.ingress.kubernetes.io
          securityContext:
            allowPrivilegeEscalation: true
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            # www-data -> 33
            runAsUser: 33
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
            - name: https
              containerPort: 443
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          lifecycle:
            preStop:
              exec:
                command:
                  - /wait-shutdown

---

 

安裝 nginx-ingress-controller

kubectl apply -f mandatory.yaml

 

暴露 nginx-ingress 80 443 端口

編輯 ingress-service.yaml

https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/service-nodeport.yaml

apiVersion: v1
kind: Service
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  type: NodePort
  ports:
    - name: http
      port: 80
      targetPort: 80
      protocol: TCP
      nodePort: 80   # http請求對外映射80端口
    - name: https
      port: 443
      targetPort: 443
      protocol: TCP
      nodePort: 443  # https請求對外映射443端口
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
 
---

 

修改 nodePort 端口範圍

一、編輯 kube-apiserver.yaml

vim /etc/kubernetes/manifests/kube-apiserver.yaml

二、找到 --service-cluster-ip-range 這一行,在這一行的下一行增長 以下內容

- --service-node-port-range=1-65535

三、修改配置後,重啓 k8s

systemctl daemon-reload

systemctl restart kubelet

systemctl status kubelet

 

啓動 nginx-ingress-service

kubectl apply -f ingress-service.yaml

 

配置 Ingress 中定義 L7 路由規則

編輯 nfys-ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: ingress-for-nginx  # Ingress 的名字,僅用於標識
  annotations:
    kubernets.io/ingress.class: "nginx"
spec:
  rules:                      # Ingress 中定義 L7 路由規則
  - http:
#    host: local.test.kinglian.cn   # 根據 virtual hostname 進行路由
      paths:                  # 按路徑進行路由
      - path: /
        backend:
          serviceName: nginx-service  # 指定後端的 Service 爲以前建立的 nginx-service
          servicePort: 80
      - path: /admin
        backend:
          serviceName: uaas-service  # 指定後端的 Service 爲以前建立的 uaas-service
          servicePort: 2019
      - path: /dims
        backend:
          serviceName: dims-service  # 指定後端的 Service 爲以前建立的 dims-service
          servicePort: 2021

---

啓動 路由規則

kubectl apply -f nfys-ingress.yaml

配置 harbor 鏡像倉庫登陸驗證

1,先用docker登陸harbor

登陸的用戶名密碼爲在harbor上註冊的用戶名密碼,而且登陸用戶須要有對應倉庫的拉取權限,不然不能訪問倉庫。登陸示例:docker login hub.yxtc.com:8081,登陸以後會生成~/.docker/config.json文件,config.json文件內容以下

{
        "auths": {
                "hub.yxtc.com:8081": {
                        "auth": "Y3I3Olh1MTIzNDU2MjU="
                }
        }
}     


其中hub.yxtc.com:8081爲harbor服務器的地址

 

2,對config.json進行base64加密

命令以下:

cat ~/.docker/config.json |base64 -w 0
ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5raW5nbGlhbi5jbiI6IHsKCQkJImF1dGgiOiAiZW1oaGJtZHRhRHBhYldneE1Ea3pORFE1TWpFPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTguMDkuNyAobGludXgpIgoJfQp9

 

3,建立secret.yaml文件

文件內容以下:

apiVersion: v1
kind: Secret
metadata:
  name: mysecret #後面要引用
data:
  .dockerconfigjson: ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5raW5nbGlhbi5jbiI6IHsKCQkJImF1dGgiOiAiZW1oaGJtZHRhRHBhYldneE1Ea3pORFE1TWpFPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTguMDkuNyAobGludXgpIgoJfQp9
type: kubernetes.io/dockerconfigjson


 

4,建立secret

命令以下:

kubectl create -f secret.yaml

 

5,新建pod

用imagePullSecrets指定secret,pod的yaml文件示例以下:

  imagePullSecrets:
    - name: mysecret  

  

發佈 一個項目

按照下文發佈後

一、會運行一個容器

二、而且不會暴露任何端口

三、容器內/opt 會掛在一個nfs存儲 對應宿主機的/nfs-share/logs

四、須要經過 ingress 進行調度

 

編輯 uaas.yaml

apiVersion: extensions/v1beta1     #與k8s集羣版本有關,使用 kubectl api-versions 便可查看當前集羣支持的版本
kind: Deployment        #該配置的類型,咱們使用的是 Deployment
metadata:               #譯名爲元數據,即 Deployment 的一些基本屬性和信息
  name: uaas-deployment        #Deployment 的名稱
  labels:           #標籤,能夠靈活定位一個或多個資源,其中key和value都可自定義,能夠定義多組,目前不須要理解
    app: uaas  #爲該Deployment設置key爲app,value爲nginx的標籤
spec:           #這是關於該Deployment的描述,能夠理解爲你期待該Deployment在k8s中如何使用
  replicas: 1   #使用該Deployment建立一個應用程序實例
  selector:         #標籤選擇器,與上面的標籤共同做用,目前不須要理解
    matchLabels: #選擇包含標籤app:nginx的資源
      app: uaas
  template:         #這是選擇或建立的Pod的模板
    metadata:   #Pod的元數據
      labels:   #Pod的標籤,上面的selector即選擇包含標籤app:nginx的Pod
        app: uaas
    spec:           #指望Pod實現的功能(即在pod中部署)
      containers:       #生成container,與docker中的container是同一種
      - name: uaas     #container的名稱
        image: registry.demo.cn/prod/nfys-uaas:20191108      #使用鏡像建立container
        imagePullPolicy: Always # Always 老是拉取 / IfNotPresent 默認有則不拉 / Never 只使用本地
        ports:
        - containerPort: 2019  #聲明鏡像端口
        volumeMounts:
        - name: uaas-persistent-storage
          mountPath: /opt #掛在目錄 (容器內)
      volumes:
      - name: uaas-persistent-storage
        nfs:
          path: /nfs-share/logs #宿主機目錄
          server: 192.168.1.61 #nfs-server 
      imagePullSecrets:
      - name: nfys-secret
---

apiVersion: v1
kind: Service
metadata:
  name: uaas-service   #Service 的名稱
  labels:       #Service 本身的標籤
    app: uaas  #爲該 Service 設置 key 爲 app,value 爲 nginx 的標籤
spec:       #這是關於該 Service 的定義,描述了 Service 如何選擇 Pod,如何被訪問
  selector:         #標籤選擇器
    app: uaas  #選擇包含標籤 app:nginx 的 Pod
  ports:
  - name: uaas-port    #端口的名字
    protocol: TCP           #協議類型 TCP/UDP
    port: 2019            #集羣內的其餘容器組可經過 80 端口訪問 Service 集羣內其餘重啓能夠經過這個端口訪問
    targetPort: 2019      #將請求轉發到匹配 Pod 的 80 端口 宿主機暴露的端口 收到的請求轉發 容器目標端口


使用 uaas.yaml 發佈項目

kubectl apply -f uaas.yaml

 

存儲 Volumes

 

健康檢測機制 Pod

LivenessProbe探針:

用於判斷容器是否存活,即Pod是否爲running狀態,若是LivenessProbe探針探測到容器不健康,則kubelet將kill掉容器,並根據容器的重啓策略是否重啓,若是一個容器不包含LivenessProbe探針,則Kubelet認爲容器的LivenessProbe探針的返回值永遠成功。

 

ReadinessProbe探針:

用於判斷容器是否啓動完成,即容器的Ready是否爲True,能夠接收請求,若是ReadinessProbe探測失敗,則容器的Ready將爲False,控制器將此Pod的Endpoint從對應的service的Endpoint列表中移除,今後再也不將任何請求調度此Pod上,直到下次探測成功。

相關文章
相關標籤/搜索