跟着炎炎鹽實踐k8s---Kubernetes1.16.10 二進制高可用集羣部署之master節點

開始在master節點部署k8s組件,咱們接着來吧!node

  • 1、部署kube-api-serverlinux

    cd /opt/k8s/work/
    wget https://github.com/kubernetes/kubernetes/releases/download/v1.16.10/kubernetes.tar.gz
    tar -xzvf kubernetes-server-linux-amd64.tar.gz
    cd kubernetes
    tar -xzvf  kubernetes-src.tar.gz
    cp -f server/bin/{apiextensions-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubeadm,kubectl,kubelet,mounter} /opt/k8s/bin/
  • 2、製做證書及相關配置文件

一、建立證書git

cd /opt/k8s/work
cat > kubernetes-csr.json <<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "10.13.33.29",
    "10.13.33.38",
    "10.13.33.40",
    "10.13.33.31",
    "10.13.33.170",
    "10.254.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local."
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF
#須要將集羣的全部IP及VIP添加進去
#若是要添加註意最後的逗號,不要忘記添加,不然下一步報錯

cfssl gencert -ca=/opt/k8s/work/ca.pem \
      -ca-key=/opt/k8s/work/ca-key.pem \
      -config=/opt/k8s/work/ca-config.json \
      -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
ls kubernetes*pem
cp kubernetes*.pem /etc/kubernetes/cert/           ##分發證書到全部master節點

二、建立加密配置文件github

cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF

cp encryption-config.yaml /etc/kubernetes/           ##分發證書到全部master節點

三、建立審計策略文件json

cat > audit-policy.yaml <<EOF
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
  # The following requests were manually identified as high-volume and low-risk, so drop them.
  - level: None
    resources:
      - group: ""
        resources:
          - endpoints
          - services
          - services/status
    users:
      - 'system:kube-proxy'
    verbs:
      - watch
  - level: None
    resources:
      - group: ""
        resources:
          - nodes
          - nodes/status
    userGroups:
      - 'system:nodes'
    verbs:
      - get
  - level: None
    namespaces:
      - kube-system
    resources:
      - group: ""
        resources:
          - endpoints
    users:
      - 'system:kube-controller-manager'
      - 'system:kube-scheduler'
      - 'system:serviceaccount:kube-system:endpoint-controller'
    verbs:
      - get
      - update
  - level: None
    resources:
      - group: ""
        resources:
          - namespaces
          - namespaces/status
          - namespaces/finalize
    users:
      - 'system:apiserver'
    verbs:
      - get
  # Don't log HPA fetching metrics.
  - level: None
    resources:
      - group: metrics.k8s.io
    users:
      - 'system:kube-controller-manager'
    verbs:
      - get
      - list
  # Don't log these read-only URLs.
  - level: None
    nonResourceURLs:
      - '/healthz*'
      - /version
      - '/swagger*'
  # Don't log events requests.
  - level: None
    resources:
      - group: ""
        resources:
          - events
  # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    users:
      - kubelet
      - 'system:node-problem-detector'
      - 'system:serviceaccount:kube-system:node-problem-detector'
    verbs:
      - update
      - patch
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    userGroups:
      - 'system:nodes'
    verbs:
      - update
      - patch
  # deletecollection calls can be large, don't log responses for expected namespace deletions
  - level: Request
    omitStages:
      - RequestReceived
    users:
      - 'system:serviceaccount:kube-system:namespace-controller'
    verbs:
      - deletecollection
  # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
  # so only log at the Metadata level.
  - level: Metadata
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - secrets
          - configmaps
      - group: authentication.k8s.io
        resources:
          - tokenreviews
  # Get repsonses can be large; skip them.
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io
    verbs:
      - get
      - list
      - watch
  # Default level for known APIs
  - level: RequestResponse
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io
  # Default level for all other requests.
  - level: Metadata
    omitStages:
      - RequestReceived
EOF

cp audit-policy.yaml /etc/kubernetes/audit-policy.yaml    ##分發到全部master節點

四、建立證書籤名請求bootstrap

cat > proxy-client-csr.json <<EOF
{
  "CN": "aggregator",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF

##生成公私鑰
cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \
  -ca-key=/etc/kubernetes/cert/ca-key.pem  \
  -config=/etc/kubernetes/cert/ca-config.json  \
  -profile=kubernetes proxy-client-csr.json | cfssljson -bare proxy-client

ls proxy-client*.pem

cp proxy-client*.pem /etc/kubernetes/cert/         ##分發到全部master節點
  • 3、啓動kube-apiserver
    一、建立kube-apiserver.service
    cat > /etc/systemd/system/kube-apiserver.service <<EOF
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
    [Service]
    WorkingDirectory=/data/k8s/k8s/kube-apiserver
    ExecStart=/opt/k8s/bin/kube-apiserver \
       --enable-admission-plugins=MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
       --advertise-address=10.13.33.38 \                                ##修改成節點ip
       --default-not-ready-toleration-seconds=360 \
       --default-unreachable-toleration-seconds=360 \
       --feature-gates=DynamicAuditing=true \    --max-mutating-requests-inflight=2000 \
       --max-requests-inflight=4000 \
       --default-watch-cache-size=200 \
       --delete-collection-workers=2 \
       --encryption-provider-config=/etc/kubernetes/encryption-config.yaml \
       --etcd-cafile=/etc/kubernetes/cert/ca.pem \
       --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \
       --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \
       --etcd-servers=https://10.13.33.38:2379,https://10.13.33.29:2379,https://10.13.33.40:2379 \             ##etcd節點ip
       --bind-address=10.13.33.38 \                                         ##修改成節點ip
       --insecure-bind-address=127.0.0.1 \
       --secure-port=6443 \
       --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \
       --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \
       --insecure-port=0 \
       --audit-dynamic-configuration \
       --audit-log-maxage=15 \
       --audit-log-maxbackup=3 \
       --audit-log-maxsize=100 \
       --audit-log-truncate-enabled \
       --audit-log-path=/data/k8s/k8s/kube-apiserver/audit.log \
       --audit-policy-file=/etc/kubernetes/audit-policy.yaml \
       --profiling \
       --anonymous-auth=false \
       --client-ca-file=/etc/kubernetes/cert/ca.pem \
       --enable-bootstrap-token-auth \
       --requestheader-allowed-names="aggregator" \
       --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \
       --requestheader-extra-headers-prefix="X-Remote-Extra-" \
       --requestheader-group-headers=X-Remote-Group \
       --requestheader-username-headers=X-Remote-User \
       --service-account-key-file=/etc/kubernetes/cert/ca.pem \
       --authorization-mode=Node,RBAC \
       --runtime-config=api/all=true \
       --enable-admission-plugins=NodeRestriction \
       --allow-privileged=true \
       --apiserver-count=3 \
       --event-ttl=168h \
       --kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem \
       --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \
       --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \
       --kubelet-https=true \
       --kubelet-timeout=10s \
       --proxy-client-cert-file=/etc/kubernetes/cert/proxy-client.pem \
       --proxy-client-key-file=/etc/kubernetes/cert/proxy-client-key.pem \
       --service-cluster-ip-range=10.254.0.0/16 \                 ##集羣ip子網
       --service-node-port-range=30000-40000 \                ##使用的端口範圍
       --logtostderr=false \
       --v=2 \
       --log-dir=/data/k8s/k8s/kube-apiserver \
       --basic-auth-file=/etc/kubernetes/basic_auth_file
    Restart=on-failure
    RestartSec=10
    Type=notify
    LimitNOFILE=65536
    [Install]
    WantedBy=multi-user.target
    EOF
##分發到全部master節點
cp kube-apiserver.service /etc/systemd/system/kube-apiserver.service 
    ##建立工做目錄
mkdir -p /data/k8s/k8s/kube-apiserver
    ##啓動kube-apiserver
systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver
   ##檢查kube-apiserver監聽的端口
netstat -lntup|grep kube
  ##確保狀態爲active (running),不然查看日誌,確認緣由
 journalctl -fu kube-apiserver
  • 4、部署控制器管理器troller-manager

一、製做證書api

cd /opt/k8s/work

cat > kube-controller-manager-csr.json <<EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "10.13.33.29",
      "10.13.33.38",
      "10.13.33.40"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-controller-manager",
        "OU": "4Paradigm"
      }
    ]
}
EOF

###這裏的IP地址爲master ip

##建立分發證書
cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*pem

cp kube-controller-manager*.pem /etc/kubernetes/cert/        ##分發到全部master節點

##建立和分發kubeconfig文件
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=https://10.13.33.170:8443 \                  ##vip
  --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=kube-controller-manager.pem \
  --client-key=kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

cp kube-controller-manager.kubeconfig /etc/kubernetes/                       ##分發到全部master節點

五、啓動kube-controller-managerapp

##建立kube-controller-manager啓動文件
cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
WorkingDirectory=${K8S_DIR}/kube-controller-manager
ExecStart=/opt/k8s/bin/kube-controller-manager \\
  --profiling \\
  --cluster-name=kubernetes \\
  --controllers=*,bootstrapsigner,tokencleaner \\
  --kube-api-qps=1000 \\
  --kube-api-burst=2000 \\
  --leader-elect \\
  --use-service-account-credentials\\
  --concurrent-service-syncs=2 \\
  --bind-address=0.0.0.0 \\
  #--secure-port=10252 \\
  --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \\
  --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \\
  #--port=0 \\
  --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-allowed-names="" \\
  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-extra-headers-prefix="X-Remote-Extra-" \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \\
  --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \\
  --experimental-cluster-signing-duration=876000h \\
  --horizontal-pod-autoscaler-sync-period=10s \\
  --concurrent-deployment-syncs=10 \\
  --concurrent-gc-syncs=30 \\
  --node-cidr-mask-size=24 \\
  --service-cluster-ip-range=${SERVICE_CIDR} \\
  --pod-eviction-timeout=6m \\
  --terminated-pod-gc-threshold=10000 \\
  --root-ca-file=/etc/kubernetes/cert/ca.pem \\
  --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \\
  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --logtostderr=true \\
  --v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF

mkdir -p /data/k8s/k8s/kube-controller-manager

##啓動 kube-controller-manager
systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager

systemctl status kube-controller-manager
netstat -lnpt | grep kube-cont
相關文章
相關標籤/搜索