服務 | 版本 |
---|---|
kubernetes | v1.12.4 |
CentOS 7.6 | CentOS Linux release 7.6.1810 (Core) |
Docker | v18.06 |
etcd | v3.3.11 |
calico | 3.1.4 |
IP | 角色 | 安裝軟件 |
---|---|---|
192.168.2.101 | k8s master | etcd,kube-apiserver,kube-controller-manager,kube-scheduler |
192.168.2.102 | k8s master | etcd,kube-apiserver,kube-controller-manager,kube-scheduler |
192.168.2.103 | k8s master | etcd,kube-apiserver,kube-controller-manager,kube-scheduler |
192.168.2.111 | k8s node01 | docker,kubelet,kube-proxy |
192.168.2.112 | k8s node02 | docker,kubelet,kube-proxy |
192.168.2.113 | k8s node02 | docker,kubelet,kube-proxy |
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime echo 'Asia/Shanghai' >/etc/timezone ntpdate time.windows.com
yum -y install vim tree wget lrzsz
cat << EOF >> /etc/profile ########################### export PS1='\[\e[32;1m\][\u@\h \W]\$ \[\e[0m\]' export HISTTIMEFORMAT="`whoami`_%F %T :" alias grep='grep --color=auto' alias egrep='egrep --color=auto' EOF
mkdir /etc/yum.repos.d/bak cp /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
# 刪除已安裝的Docker yum remove docker \ docker-client \ docker-client-latest \ docker-common \ docker-latest \ docker-latest-logrotate \ docker-logrotate \ docker-selinux \ docker-engine-selinux \ docker-engine # 安裝 docker-ce 使用命令 yum install -y yum-utils device-mapper-persistent-data lvm2 # 配置docker-ce 官方源 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo # 配置docker-ce 阿里雲源 yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
yum makecache yum update -y
vi /etc/selinux/config SELINUX=permissive setenforce 0
cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF sysctl --system
swapoff -a # 禁用fstab中的swap項目 vi /etc/fstab #/dev/mapper/centos-swap swap swap defaults 0 0 # 確認swap已經被禁用 cat /proc/swaps Filename Type Size Used Priority
ulimit -SHn 65535
cat << EOF >> /etc/hosts 192.168.2.100 k8s-master-lb 192.168.2.101 k8s-master01 192.168.2.102 k8s-master02 192.168.2.103 k8s-master03 192.168.2.111 k8s-node01 192.168.2.112 k8s-node02 192.168.2.113 k8s-node03 EOF
[root@k8s-master01 ~]# more /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.2.100 k8s-master-lb 192.168.2.101 k8s-master01 192.168.2.101 k8s-master02 192.168.2.102 k8s-master03 192.168.2.111 k8s-node01 192.168.2.111 k8s-node02 192.168.2.112 k8s-node03
modprobe ip_vs modprobe ip_vs_rr modprobe ip_vs_wrr modprobe ip_vs_sh modprobe nf_conntrack_ipv4
systemctl disable firewalld systemctl stop firewalld
reboot
ssh-keygen -t rsa for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03;do ssh-copy-id -i .ssh/id_rsa.pub $i;done
mkdir /opt/ssl cd /opt/ssl wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 chmod +x * mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF
cat > ca-csr.json << EOF { "CN": "Kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "System" } ] } EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cat > etcd-csr.json << EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.2.101", "192.168.2.102", "192.168.2.103" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
# ls *.pem ca-key.pem ca.pem etcd-key.pem etcd.pem
mkdir /etc/kubernetes/etcd/{bin,cfg,ssl} -p cp ca-key.pem ca.pem etcd-key.pem etcd.pem /etc/kubernetes/etcd/ssl/
wget https://github.com/etcd-io/etcd/releases/download/v3.3.11/etcd-v3.3.11-linux-amd64.tar.gz tar zxvf etcd-v3.3.11-linux-amd64.tar.gz mv etcd-v3.3.11-linux-amd64/{etcd,etcdctl} /etc/kubernetes/etcd/bin
如下部署步驟在規劃的三個etcd節點操做同樣,惟一不一樣的是etcd配置文件中的服務器IP要寫當前的:html
例如:k8s-master01node
cat > /etc/kubernetes/etcd/cfg/etcd << EOF #[Member] ETCD_NAME="k8s-master01" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.2.101:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.2.101:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.101:2380" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.101:2379" ETCD_INITIAL_CLUSTER="k8s-master01=https://192.168.2.101:2380,k8s-master02=https://192.168.2.102:2380,k8s-master03=https://192.168.2.103:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" EOF
# cat /usr/lib/systemd/system/etcd.service [Unit] Description=Etcd etcd After=network.target After=network-online.target Wants=network-online.target [Service] Type=notify EnvironmentFile=/etc/kubernetes/etcd/cfg/etcd ExecStart=/etc/kubernetes/etcd/bin/etcd \ --name=${ETCD_NAME} \ --data-dir=${ETCD_DATA_DIR} \ --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \ --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \ --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \ --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \ --initial-cluster=${ETCD_INITIAL_CLUSTER} \ --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \ --initial-cluster-state=new \ --cert-file=/etc/kubernetes/etcd/ssl/etcd.pem \ --key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem \ --peer-cert-file=/etc/kubernetes/etcd/ssl/etcd.pem \ --peer-key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem \ --trusted-ca-file=/etc/kubernetes/etcd/ssl/ca.pem \ --peer-trusted-ca-file=/etc/kubernetes/etcd/ssl/ca.pem Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target
USER=root CONTROL_PLANE_IPS="k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03" for host in $CONTROL_PLANE_IPS; do ssh "${USER}"@$host "mkdir -p /etc/kubernetes/" scp -r /etc/kubernetes/etcd/ "${USER}"@$host:/etc/kubernetes/ scp -r /usr/lib/systemd/system/etcd.service "${USER}"@$host:/usr/lib/systemd/system/etcd.service done
systemctl start etcd systemctl enable etcd
# /etc/kubernetes/etcd/bin/etcdctl \ --ca-file=/etc/kubernetes/etcd/ssl/ca.pem --cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem \ --endpoints="https://192.168.2.101:2379,https://192.168.2.102:2379,https://192.168.2.103:2379" \ cluster-health # 結果 member ad9328796634e0d0 is healthy: got healthy result from https://192.168.2.101:2379 member f03f45bbcae9634b is healthy: got healthy result from https://192.168.2.103:2379 member fddf9c47e41c5ec2 is healthy: got healthy result from https://192.168.2.102:2379 cluster is healthy
yum install -y keepalived haproxy
cp /etc/haproxy/haproxy.cfg{,.bak} cat > /etc/haproxy/haproxy.cfg << EOF global log /dev/log local0 log /dev/log local1 notice chroot /var/lib/haproxy stats socket /var/run/haproxy-admin.sock mode 660 level admin stats timeout 30s user haproxy group haproxy daemon nbproc 1 defaults log global timeout connect 5000 timeout client 10m timeout server 10m listen admin_stats bind 0.0.0.0:10080 mode http log 127.0.0.1 local0 err stats refresh 30s stats uri /status stats realm welcome login\ Haproxy stats auth admin:123456 stats hide-version stats admin if TRUE listen kube-master bind 0.0.0.0:8443 mode tcp option tcplog balance roundrobin server 192.168.2.101 192.168.2.101:6443 check inter 2000 fall 2 rise 2 weight 1 server 192.168.2.102 192.168.2.102:6443 check inter 2000 fall 2 rise 2 weight 1 server 192.168.2.103 192.168.2.103:6443 check inter 2000 fall 2 rise 2 weight 1 EOF
cp /etc/keepalived/keepalived.conf{,.bak} cat > /etc/keepalived/keepalived.conf << EOF global_defs { router_id lb-master-100 } vrrp_script check-haproxy { script "killall -0 haproxy" interval 3 } vrrp_instance VI-kube-master { state MASTER priority 120 dont_track_primary interface ens160 virtual_router_id 68 advert_int 3 track_script { check-haproxy } virtual_ipaddress { 192.168.2.100 #VIP,訪問此IP調用api-server } } EOF
cp /etc/keepalived/keepalived.conf{,.bak} cat > /etc/keepalived/keepalived.conf << EOF global_defs { router_id lb-master-100 } vrrp_script check-haproxy { script "killall -0 haproxy" interval 3 } vrrp_instance VI-kube-master { state BACKUP priority 110 dont_track_primary interface ens160 virtual_router_id 68 advert_int 3 track_script { check-haproxy } virtual_ipaddress { 192.168.2.100 #VIP,訪問此IP調用api-server } } EOF
#haproxy systemctl enable haproxy systemctl start haproxy #keepalive systemctl enable keepalived systemctl start keepalived
systemctl status haproxy|grep Active systemctl status keepalived|grep Active
systemctl status haproxy|grep Active systemctl status keepalived|grep Active
# ip addr show | grep 192.168.2.100 inet 192.168.2.100/32 scope global ens160
wget https://dl.k8s.io/v1.12.4/kubernetes-server-linux-amd64.tar.gz # 不能下載用下一個連接(國外下載) # wget https://storage.googleapis.com/kubernetes-release/release/v1.12.4/kubernetes-server-linux-amd64.tar.gz # (國內下載) tar zxvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin cp kube-apiserver kube-scheduler kube-controller-manager kubectl /etc/kubernetes/server/bin/
cat > admin-csr.json <<EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:masters", "OU": "System" } ] } EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
cp ca-key.pem ca.pem admin.pem admin-key.pem /etc/kubernetes/server/ssl/
kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.2.100:8443 \ --kubeconfig=kubectl.kubeconfig # 設置客戶端認證參數 kubectl config set-credentials admin \ --client-certificate=/etc/kubernetes/server/ssl/admin.pem \ --client-key=/etc/kubernetes/server/ssl/admin-key.pem \ --embed-certs=true \ --kubeconfig=kubectl.kubeconfig # 設置上下文參數 kubectl config set-context kubernetes \ --cluster=kubernetes \ --user=admin \ --kubeconfig=kubectl.kubeconfig # 設置默認上下文 kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
cp kubectl.kubeconfig ~/.kube/config for i in k8s-master02 k8s-master03;do scp -r ~/.kube/ $i:~/;done
cat > apiserver-csr.json <<EOF { "CN": "kubernetes", "hosts": [ "127.0.0.1", "192.168.2.101", "192.168.2.102", "192.168.2.103", "192.168.2.100", "10.254.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
若是使用非 cluster.local 域名,如 bqding.com,則須要修改域名列表中的最後兩個域名爲:kubernetes.default.svc.bqding、kubernetes.default.svc.bqding.comlinux
生成證書和私鑰nginx
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes apiserver-csr.json | cfssljson -bare apiservier
cp apiservier*.pem /etc/kubernetes/server/ssl/
cat > /etc/kubernetes/server/cfg/encryption-config.yaml <<EOF kind: EncryptionConfig apiVersion: v1 resources: - resources: - secrets providers: - aescbc: keys: - name: key1 secret: $(head -c 32 /dev/urandom | base64) - identity: {} EOF
# cat /etc/kubernetes/server/cfg/kube-apiserver KUBE_APISERVER_OPTS=" --enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \ --anonymous-auth=false \ --experimental-encryption-provider-config=/etc/kubernetes/server/ssl/encryption-config.yaml \ --advertise-address=192.168.2.101 \ --bind-address=192.168.2.101 \ --insecure-port=0 \ --authorization-mode=Node,RBAC \ --runtime-config=api/all \ --enable-bootstrap-token-auth \ --service-cluster-ip-range=10.254.0.0/16 \ --service-node-port-range=30000-32700 \ --tls-cert-file=/etc/kubernetes/server/ssl/apiservier.pem \ --tls-private-key-file=/etc/kubernetes/server/ssl/apiservier-key.pem \ --client-ca-file=/etc/kubernetes/server/ssl/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/server/ssl/apiservier.pem \ --kubelet-client-key=/etc/kubernetes/server/ssl/apiservier-key.pem \ --service-account-key-file=/etc/kubernetes/server/ssl/ca-key.pem \ --etcd-cafile=/etc/kubernetes/etcd/ssl/ca.pem \ --etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pem \ --etcd-servers=https://192.168.2.101:2379,https://192.168.2.102:2379,https://192.168.2.103:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --apiserver-count=3 \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/log/kube-apiserver-audit.log \ --event-ttl=1h \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2"
# cat /usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] EnvironmentFile=-/etc/kubernetes/server/cfg/kube-apiserver ExecStart=/etc/kubernetes/server/bin/kube-apiserver $KUBE_APISERVER_OPTS Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target
mkdir -p /var/log/kubernetes
USER=root for host in k8s-master02 k8s-master03;do ssh "${USER}"@$host "mkdir -p /var/log/kubernetes" scp -r /etc/kubernetes/server/ "${USER}"@$host:/etc/kubernetes/ scp /usr/lib/systemd/system/kube-apiserver.service "${USER}"@$host:/usr/lib/systemd/system/kube-apiserver.service done
systemctl daemon-reload systemctl enable kube-apiserver systemctl start kube-apiserver
# netstat -ptln | grep kube-apiserve tcp 0 0 192.168.2.101:6443 0.0.0.0:* LISTEN 15786/kube-apiserve
# kubectl cluster-info Kubernetes master is running at https://192.168.2.100:8443 To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes
cat > kube-controller-manager-csr.json << EOF { "CN": "system:kube-controller-manager", "key": { "algo": "rsa", "size": 2048 }, "hosts": [ "127.0.0.1", "192.168.2.101", "192.168.2.102", "192.168.2.103" ], "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:kube-controller-manager", "OU": "System" } ] } EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
cp kube-controller-manager*.pem /etc/kubernetes/server/ssl/
kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.2.100:8443 \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-credentials system:kube-controller-manager \ --client-certificate=/etc/kubernetes/server/ssl/kube-controller-manager.pem \ --client-key=/etc/kubernetes/server/ssl/kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-context system:kube-controller-manager \ --cluster=kubernetes \ --user=system:kube-controller-manager \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
# cat /etc/kubernetes/server/cfg/kube-controller-manager KUBE_CONTROLLER_MANAGER_OPTS="--port=0 \ --secure-port=10252 \ --bind-address=127.0.0.1 \ --kubeconfig=/etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig \ --authentication-kubeconfig=/etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig \ --service-cluster-ip-range=10.254.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/etc/kubernetes/server/ssl/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/server/ssl/ca-key.pem \ --experimental-cluster-signing-duration=8760h \ --root-ca-file=/etc/kubernetes/server/ssl/ca.pem \ --service-account-private-key-file=/etc/kubernetes/server/ssl/ca-key.pem \ --leader-elect=true \ --feature-gates=RotateKubeletServerCertificate=true \ --controllers=*,bootstrapsigner,tokencleaner \ --horizontal-pod-autoscaler-use-rest-clients=true \ --horizontal-pod-autoscaler-sync-period=10s \ --tls-cert-file=/etc/kubernetes/server/ssl/kube-controller-manager.pem \ --tls-private-key-file=/etc/kubernetes/server/ssl/kube-controller-manager-key.pem \ --use-service-account-credentials=true \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2"
# cat /usr/lib/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/etc/kubernetes/server/cfg/kube-controller-manager ExecStart=/etc/kubernetes/server/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS Restart=on Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
USER=root for host in k8s-master02 k8s-master03;do ssh "${USER}"@$host "mkdir -p /var/log/kubernetes" scp /etc/kubernetes/server/ssl/kube-controller-manager*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/ scp /usr/lib/systemd/system/kube-controller-manager.service "${USER}"@$host:/usr/lib/systemd/system/kube-controller-manager.service scp /etc/kubernetes/server/cfg/kube-controller-manager "${USER}"@$host:/etc/kubernetes/server/cfg/kube-controller-manager scp /etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig "${USER}"@$host:/etc/kubernetes/server/cfg/kube-controller-manager.kubeconfig done
systemctl daemon-reload systemctl enable kube-controller-manager systemctl start kube-controller-manager
# netstat -lnpt|grep kube-controlle tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 3090/kube-controlle
# kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml apiVersion: v1 kind: Endpoints metadata: annotations: control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master01_28c03ae9-18a9-11e9-a6d8-000c2927a0d0","leaseDurationSeconds":15,"acquireTime":"2019-01-15T09:37:38Z","renewTime":"2019-01-15T09:42:06Z","leaderTransitions":1}' creationTimestamp: 2019-01-15T09:37:14Z name: kube-controller-manager namespace: kube-system resourceVersion: "2413" selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager uid: 24132473-18a9-11e9-936a-000c2927a0d0
# cat > kube-scheduler-csr.json << EOF { "CN": "system:kube-scheduler", "hosts": [ "127.0.0.1", "192.168.2.101", "192.168.2.102", "192.168.2.103" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:kube-scheduler", "OU": "System" } ] } EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
cp kube-scheduler*.pem /etc/kubernetes/server/ssl/
kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.2.100:8443 \ --kubeconfig=kube-scheduler.kubeconfig kubectl config set-credentials system:kube-scheduler \ --client-certificate=/etc/kubernetes/server/ssl/kube-scheduler.pem \ --client-key=/etc/kubernetes/server/ssl/kube-scheduler-key.pem \ --embed-certs=true \ --kubeconfig=kube-scheduler.kubeconfig kubectl config set-context system:kube-scheduler \ --cluster=kubernetes \ --user=system:kube-scheduler \ --kubeconfig=kube-scheduler.kubeconfig kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
# cat /etc/kubernetes/server/cfg/kube-scheduler KUBE_SCHEDULER_OPTS=" --address=127.0.0.1 \ --kubeconfig=/etc/kubernetes/server/cfg/kube-scheduler.kubeconfig \ --leader-elect=true \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2"
# cat /usr/lib/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/etc/kubernetes/server/cfg/kube-scheduler ExecStart=/etc/kubernetes/server/bin/kube-scheduler $KUBE_SCHEDULER_OPTS Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
USER=root for host in k8s-master02 k8s-master03;do ssh "${USER}"@$host "mkdir -p /var/log/kubernetes" scp /etc/kubernetes/server/ssl/kube-scheduler*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/ scp /usr/lib/systemd/system/kube-scheduler.service "${USER}"@$host:/usr/lib/systemd/system/kube-scheduler.service scp /etc/kubernetes/server/cfg/kube-scheduler "${USER}"@$host:/etc/kubernetes/server/cfg/kube-scheduler scp /etc/kubernetes/server/cfg/kube-scheduler.kubeconfig "${USER}"@$host:/etc/kubernetes/server/cfg/kube-scheduler.kubeconfig done
systemctl daemon-reload systemctl enable kube-scheduler systemctl start kube-scheduler
# netstat -lnpt|grep kube-scheduler tcp 0 0 127.0.0.1:10251 0.0.0.0:* LISTEN 3155/kube-scheduler
# kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml apiVersion: v1 kind: Endpoints metadata: annotations: control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master01_eb23817d-18a9-11e9-8445-000c2927a0d0","leaseDurationSeconds":15,"acquireTime":"2019-01-15T09:43:05Z","renewTime":"2019-01-15T09:44:32Z","leaderTransitions":1}' creationTimestamp: 2019-01-15T09:37:03Z name: kube-scheduler namespace: kube-system resourceVersion: "2594" selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler uid: 1d51b563-18a9-11e9-bfed-000c296ab1b4
# kubectl get componentstatuses NAME STATUS MESSAGE ERROR controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: net/http: HTTP/1.x transport connection broken: malformed HTTP response "\x15\x03\x01\x00\x02\x02" scheduler Healthy ok etcd-2 Healthy {"health":"true"} etcd-0 Healthy {"health":"true"} etcd-1 Healthy {"health":"true"}
yum install -y epel-release wget conntrack ipvsadm ipset jq iptables curl sysstat libseccomp && /usr/sbin/modprobe ip_vs
yum list docker-ce --showduplicates | sort -r
yum install -y docker-ce-18.06.1.ce-3.el7
systemctl enable docker && systemctl start docker
wget https://dl.k8s.io/v1.12.4/kubernetes-server-linux-amd64.tar.gz tar -xzvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin/ cp kubelet kube-proxy /etc/kubernetes/server/bin/
#建立 token export BOOTSTRAP_TOKEN=$(kubeadm token create \ --description kubelet-bootstrap-token \ --groups system:bootstrappers:k8s-master01 \ --kubeconfig ~/.kube/config) # 設置集羣參數 kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.2.100:8443 \ --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig # 設置客戶端認證參數 kubectl config set-credentials kubelet-bootstrap \ --token=${BOOTSTRAP_TOKEN} \ --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig # 設置上下文參數 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig # 設置默認上下文 kubectl config use-context default --kubeconfig=kubelet-bootstrap-k8s-master01.kubeconfig
# kubeadm token list --kubeconfig ~/.kube/config TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS cpwqfo.x1vxl10wzq1e3eid 23h 2019-01-17T10:00:48+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-master02 hfn1ki.7550z7bywogn1hjm 23h 2019-01-17T10:00:32+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-master03 sexqfs.8vb2su8o8iinp1jh 23h 2019-01-17T09:57:36+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:k8s-master01
kube-apiserver 接收 kubelet 的 bootstrap token 後,將請求的 user 設置爲 system:bootstrap:,group 設置爲 system:bootstrappers;git
查看各 token 關聯的 Secretgithub
# kubectl get secrets -n kube-system NAME TYPE DATA AGE attachdetach-controller-token-tprrl kubernetes.io/service-account-token 3 16h bootstrap-signer-token-k9xbg kubernetes.io/service-account-token 3 16h bootstrap-token-cpwqfo bootstrap.kubernetes.io/token 7 4m4s bootstrap-token-hfn1ki bootstrap.kubernetes.io/token 7 4m20s bootstrap-token-sexqfs bootstrap.kubernetes.io/token 7 7m16s certificate-controller-token-8pm9l kubernetes.io/service-account-token 3 16h clusterrole-aggregation-controller-token-l6z4j kubernetes.io/service-account-token 3 16h cronjob-controller-token-ntrcn kubernetes.io/service-account-token 3 16h daemon-set-controller-token-hpsgr kubernetes.io/service-account-token 3 16h default-token-jh6zz kubernetes.io/service-account-token 3 16h deployment-controller-token-l6s7n kubernetes.io/service-account-token 3 16h disruption-controller-token-zdb4r kubernetes.io/service-account-token 3 16h endpoint-controller-token-8k7lw kubernetes.io/service-account-token 3 16h expand-controller-token-fwrbt kubernetes.io/service-account-token 3 16h generic-garbage-collector-token-v6ll5 kubernetes.io/service-account-token 3 16h horizontal-pod-autoscaler-token-9f5t5 kubernetes.io/service-account-token 3 16h job-controller-token-vcjvp kubernetes.io/service-account-token 3 16h namespace-controller-token-zx28b kubernetes.io/service-account-token 3 16h node-controller-token-d9nl5 kubernetes.io/service-account-token 3 16h persistent-volume-binder-token-7lcfq kubernetes.io/service-account-token 3 16h pod-garbage-collector-token-gx445 kubernetes.io/service-account-token 3 16h pv-protection-controller-token-lv2n4 kubernetes.io/service-account-token 3 16h pvc-protection-controller-token-cpvk7 kubernetes.io/service-account-token 3 16h replicaset-controller-token-52xhf kubernetes.io/service-account-token 3 16h replication-controller-token-qbs4f kubernetes.io/service-account-token 3 16h resourcequota-controller-token-gphkl kubernetes.io/service-account-token 3 16h service-account-controller-token-vk9mn kubernetes.io/service-account-token 3 16h service-controller-token-mntf7 kubernetes.io/service-account-token 3 16h statefulset-controller-token-ljnbs kubernetes.io/service-account-token 3 16h token-cleaner-token-v65g8 kubernetes.io/service-account-token 3 16h ttl-controller-token-w5cpc kubernetes.io/service-account-token 3 16h
DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag
cat > kubelet.config.json <<EOF { "kind": "KubeletConfiguration", "apiVersion": "kubelet.config.k8s.io/v1beta1", "authentication": { "x509": { "clientCAFile": "/etc/kubernetes/server/ssl/ca.pem" }, "webhook": { "enabled": true, "cacheTTL": "2m0s" }, "anonymous": { "enabled": false } }, "authorization": { "mode": "Webhook", "webhook": { "cacheAuthorizedTTL": "5m0s", "cacheUnauthorizedTTL": "30s" } }, "address": "NodeIP", "port": 10250, "readOnlyPort": 0, "cgroupDriver": "cgroupfs", "hairpinMode": "promiscuous-bridge", "serializeImagePulls": false, "featureGates": { "RotateKubeletClientCertificate": true, "RotateKubeletServerCertificate": true }, "clusterDomain": "cluster.local.", "clusterDNS": ["10.254.0.2"] } EOF
[Unit] Description=Kubernetes Kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/etc/kubernetes/server/bin/kubelet \ --bootstrap-kubeconfig=/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig \ --cert-dir=/etc/kubernetes/server/ssl \ --kubeconfig=/etc/kubernetes/server/cfg/kubelet.kubeconfig \ --config=/etc/kubernetes/server/cfg/kubelet.config.json \ --network-plugin=cni \ --hostname-override=NodeIP \ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \ --allow-privileged=true \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
USER=root for host in k8s-node01 k8s-node02 k8s-node03;do ssh "${USER}"@$host "mkdir -p /etc/kubernetes/server/{bin,cfg,ssl}" scp /etc/kubernetes/server/bin/kubelet "${USER}"@$host:/etc/kubernetes/server/bin/kubelet scp /etc/kubernetes/server/bin/kube-proxy "${USER}"@$host:/etc/kubernetes/server/bin/kube-proxy scp /usr/lib/systemd/system/kubelet.service "${USER}"@$host:/usr/lib/systemd/system/kubelet.service scp /etc/kubernetes/server/cfg/kubelet.config.json "${USER}"@$host:/etc/kubernetes/server/cfg/kubelet.config.json scp /etc/kubernetes/server/ssl/ca*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/ done scp /etc/kubernetes/server/cfg/kubelet-bootstrap-k8s-master01.kubeconfig k8s-node01:/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig scp /etc/kubernetes/server/cfg/kubelet-bootstrap-k8s-master02.kubeconfig k8s-node02:/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig scp /etc/kubernetes/server/cfg/kubelet-bootstrap-k8s-master03.kubeconfig k8s-node03:/etc/kubernetes/server/cfg/kubelet-bootstrap.kubeconfig
# sudo journalctl -u kubelet -a |grep -A 2 'certificatesigningrequests' Jan 16 10:57:58 k8s-node01 kubelet[13154]: F0116 10:57:58.720659 13154 server.go:262] failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:bootstrap:sexqfs" cannot create resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope Jan 16 10:57:58 k8s-node01 kubelet[13154]: goroutine 1 [running]: Jan 16 10:57:58 k8s-node01 kubelet[13154]: k8s.io/kubernetes/vendor/github.com/golang/glog.stacks(0xc420b42500, 0xc4208c6000, 0x137, 0x36f)
# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
systemctl daemon-reload systemctl enable kubelet systemctl restart kubelet
kubelet 啓動後使用 --bootstrap-kubeconfig 向 kube-apiserver 發送 CSR 請求,當這個 CSR 被 approve 後,kube-controller-manager 爲 kubelet 建立 TLS 客戶端證書、私鑰和 --kubeletconfig 文件。golang
注意:kube-controller-manager 須要配置 --cluster-signing-cert-file 和 --cluster-signing-key-file 參數,纔會爲 TLS Bootstrap 建立證書和私鑰。web
此時kubelet的進程有,可是監聽端口還未啓動,須要進行下面步驟!chrome
# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY 3m46s system:bootstrap:sexqfs Pending node-csr-c9EwBERPn8pjoCkYvX7jV-GansnNO4V2kPT3msYFVu4 3m46s system:bootstrap:cpwqfo Pending node-csr-tPZAgKp8z-3nZMe4rPR2WEscJB-ox61VMQtijy6BO_M 3m46s system:bootstrap:hfn1ki Pending
# kubectl certificate approve node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY certificatesigningrequest.certificates.k8s.io/node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY approved
# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY 4m34s system:bootstrap:sexqfs Approved,Issued node-csr-c9EwBERPn8pjoCkYvX7jV-GansnNO4V2kPT3msYFVu4 4m34s system:bootstrap:cpwqfo Pending node-csr-tPZAgKp8z-3nZMe4rPR2WEscJB-ox61VMQtijy6BO_M 4m34s system:bootstrap:hfn1ki Pending
# kubectl describe csr node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY Name: node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY Labels: <none> Annotations: <none> CreationTimestamp: Wed, 16 Jan 2019 10:59:33 +0800 Requesting User: system:bootstrap:sexqfs Status: Approved,Issued Subject: Common Name: system:node:192.168.2.111 Serial Number: Organization: system:nodes Events: <none>
# cat > csr-crb.yaml <<EOF # Approve all CSRs for the group "system:bootstrappers" kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: auto-approve-csrs-for-group subjects: - kind: Group name: system:bootstrappers apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:nodeclient apiGroup: rbac.authorization.k8s.io --- # To let a node of the group "system:nodes" renew its own credentials kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: node-client-cert-renewal subjects: - kind: Group name: system:nodes apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient apiGroup: rbac.authorization.k8s.io --- # A ClusterRole which instructs the CSR approver to approve a node requesting a # serving cert matching its client cert. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: approve-node-server-renewal-csr rules: - apiGroups: ["certificates.k8s.io"] resources: ["certificatesigningrequests/selfnodeserver"] verbs: ["create"] --- # To let a node of the group "system:nodes" renew its own server credentials kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: node-server-cert-renewal subjects: - kind: Group name: system:nodes apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: approve-node-server-renewal-csr apiGroup: rbac.authorization.k8s.io EOF
node-server-cert-renewal:自動 approve node 後續過時的 server 證書,自動生成的證書 Group 爲 system:nodes;docker
生效配置
# kubectl apply -f csr-crb.yaml
# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-_66QdtyS-i4S8DVmFcT8O3TMqvj6I5tKbXIuzEIjHbY 21m system:bootstrap:sexqfs Approved,Issued node-csr-c9EwBERPn8pjoCkYvX7jV-GansnNO4V2kPT3msYFVu4 21m system:bootstrap:cpwqfo Approved,Issued node-csr-tPZAgKp8z-3nZMe4rPR2WEscJB-ox61VMQtijy6BO_M 21m system:bootstrap:hfn1ki Approved,Issued
# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.2.111 Ready <none> 17m v1.12.4 192.168.2.112 Ready <none> 7m45s v1.12.4 192.168.2.113 Ready <none> 7m44s v1.12.4
# tree /etc/kubernetes/server/ /etc/kubernetes/server/ ├── bin │ ├── kubectl │ ├── kubelet │ └── kube-proxy ├── cfg │ ├── kubelet-bootstrap.kubeconfig │ ├── kubelet.config.json │ └── kubelet.kubeconfig └── ssl ├── ca-key.pem ├── ca.pem ├── kubelet-client-2019-01-16-11-03-54.pem ├── kubelet-client-current.pem -> /etc/kubernetes/server/ssl/kubelet-client-2019-01-16-11-03-54.pem ├── kubelet.crt └── kubelet.key
# netstat -lnpt|grep kubelet tcp 0 0 127.0.0.1:10248 0.0.0.0:* LISTEN 13537/kubelet tcp 0 0 192.168.2.111:10250 0.0.0.0:* LISTEN 13537/kubelet tcp 0 0 127.0.0.1:39767 0.0.0.0:* LISTEN 13537/kubelet
10250: https API 服務;注意:未開啓只讀端口 10255;
因爲關閉了匿名認證,同時開啓了 webhook 受權,全部訪問 10250 端口 https API 的請求都須要被認證和受權。
例如執行 kubectl ec -it nginx-ds-5rmws -- sh 命令時,kube-apiserver 會向 kubelet 發送以下請求:
POST /exec/default/nginx-ds-5rmws/my-nginx?command=sh&input=1&output=1&tty=1
# kubectl describe clusterrole system:kubelet-api-admin Name: system:kubelet-api-admin Labels: kubernetes.io/bootstrapping=rbac-defaults Annotations: rbac.authorization.kubernetes.io/autoupdate: true PolicyRule: Resources Non-Resource URLs Resource Names Verbs --------- ----------------- -------------- ----- nodes/log [] [] [*] nodes/metrics [] [] [*] nodes/proxy [] [] [*] nodes/spec [] [] [*] nodes/stats [] [] [*] nodes [] [] [get list watch proxy]
# curl -s --cacert /etc/kubernetes/server/ssl/ca.pem https://192.168.2.111:10250/metrics # curl -s --cacert /etc/kubernetes/server/ssl/ca.pem -H "Authorization: Bearer 123456" https://192.168.2.111:10250/metrics
# curl -s --cacert /etc/kubernetes/server/ssl/ca.pem --cert /etc/kubernetes/server/ssl/kube-controller-manager.pem --key /etc/kubernetes/server/ssl/kube-controller-manager-key.pem https://192.168.2.111:10250/metrics # curl -s --cacert /etc/kubernetes/server/ssl/ca.pem --cert /etc/kubernetes/server/ssl/admin.pem --key /etc/kubernetes/server/ssl/admin-key.pem https://192.168.2.111:10250/metrics|head
kubectl create sa kubelet-api-test kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin --serviceaccount=default:kubelet-api-test SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}') TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}') echo ${TOKEN} curl -s --cacert /etc/kubernetes/server/ssl/ca.pem -H "Authorization: Bearer ${TOKEN}" https://192.168.2.111:10250/metrics|head
cat << EOF | tee kube-proxy-csr.json { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "System" } ] } EOF
該證書只會被 kube-proxy 當作 client 證書使用,因此 hosts 字段爲空;
生成證書
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
mkdir /etc/kubernetes/server/{bin,cfg,ssl} -p cp kube-proxy-key.pem kube-proxy.pem /etc/kubernetes/server/ssl/
kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/server/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.2.100:8443 \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \ --client-certificate=/etc/kubernetes/server/ssl/kube-proxy.pem \ --client-key=/etc/kubernetes/server/ssl/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
# cat /usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/etc/kubernetes/server/bin/kube-proxy \ --bind-address=192.168.2.111 \ --hostname-override=k8s-node01\ --cluster-cidr=172.16.0.0/16 \ --kubeconfig=/etc/kubernetes/server/cfg/kube-proxy.kubeconfig \ --feature-gates=SupportIPVSProxyMode=true \ --masquerade-all \ --proxy-mode=ipvs \ --ipvs-min-sync-period=5s \ --ipvs-sync-period=5s \ --ipvs-scheduler=rr \ --logtostderr=true \ --v=2 \ --logtostderr=false \ --log-dir=/var/lib/kube-proxy/log Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target
USER=root for host in k8s-node01 k8s-node02 k8s-node03;do ssh "${USER}"@$host "mkdir -p mkdir -p /var/lib/kube-proxy/log" scp /usr/lib/systemd/system/kube-proxy.service "${USER}"@$host:/usr/lib/systemd/system/kube-proxy.service scp /etc/kubernetes/server/cfg/kube-proxy.kubeconfig "${USER}"@$host:/etc/kubernetes/server/cfg/kube-proxy.kubeconfig scp /etc/kubernetes/server/ssl/kube-proxy*.pem "${USER}"@$host:/etc/kubernetes/server/ssl/ done
systemctl daemon-reload systemctl enable kube-proxy systemctl restart kube-proxy
systemctl status kube-proxy|grep Active
journalctl -u kube-proxy
# netstat -lnpt|grep kube-proxy tcp 0 0 127.0.0.1:10249 0.0.0.0:* LISTEN 21237/kube-proxy tcp6 0 0 :::10256 :::* LISTEN 21237/kube-proxy
# ipvsadm -L -n IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 10.254.0.1:443 rr -> 192.168.2.101:6443 Masq 1 0 0 -> 192.168.2.102:6443 Masq 1 0 0 -> 192.168.2.103:6443 Masq 1 0 0
curl https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/rbac.yaml -O curl https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/calico.yaml -O
ETCD_ENDPOINTS="https://192.168.2.101:2379,https://192.168.2.102:2379,https://192.168.2.103:2379" sed -i "s#.*etcd_endpoints:.*# etcd_endpoints: \"${ETCD_ENDPOINTS}\"#g" calico.yaml sed -i "s#__ETCD_ENDPOINTS__#${ETCD_ENDPOINTS}#g" calico.yaml
ETCD_CERT=`cat /etc/kubernetes/etcd/ssl/etcd.pem | base64 | tr -d '\n'` ETCD_KEY=`cat /etc/kubernetes/etcd/ssl/etcd-key.pem | base64 | tr -d '\n'` ETCD_CA=`cat /etc/kubernetes/etcd/ssl/ca.pem | base64 | tr -d '\n'` sed -i "s#.*etcd-cert:.*# etcd-cert: ${ETCD_CERT}#g" calico.yaml sed -i "s#.*etcd-key:.*# etcd-key: ${ETCD_KEY}#g" calico.yaml sed -i "s#.*etcd-ca:.*# etcd-ca: ${ETCD_CA}#g" calico.yaml sed -i 's#.*etcd_ca:.*# etcd_ca: "/calico-secrets/etcd-ca"#g' calico.yaml sed -i 's#.*etcd_cert:.*# etcd_cert: "/calico-secrets/etcd-cert"#g' calico.yaml sed -i 's#.*etcd_key:.*# etcd_key: "/calico-secrets/etcd-key"#g' calico.yaml sed -i "s#__ETCD_KEY_FILE__#/etc/kubernetes/etcd/ssl/etcd-key.pem#g" calico.yaml sed -i "s#__ETCD_CERT_FILE__#/etc/kubernetes/etcd/ssl/etcd.pem#g" calico.yaml sed -i "s#__ETCD_CA_CERT_FILE__#/etc/kubernetes/etcd/ssl/ca.pem#g" calico.yaml sed -i "s#__KUBECONFIG_FILEPATH__#/etc/cni/net.d/calico-kubeconfig#g" calico.yaml
sed -i '/CALICO_IPV4POOL_IPIP/{n;s/Always/off/g}' calico.yaml sed -i '/CALICO_IPV4POOL_CIDR/{n;s/192.168.0.0/172.16.0.0/g}' calico.yaml
kubectl apply -f calico.yaml
--allow-privileged=true
--network-plugin=cni
--cluster-cidr=172.16.0.0/16
# kubectl get pod -n kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE calico-kube-controllers-7875f976cd-gxfdj 1/1 Running 1 20m 192.168.2.113 192.168.2.113 <none> calico-node-78gtd 2/2 Running 2 20m 192.168.2.111 192.168.2.111 <none> calico-node-dxw6z 2/2 Running 2 20m 192.168.2.113 192.168.2.113 <none> calico-node-wvrxd 2/2 Running 2 20m 192.168.2.112 192.168.2.112 <none>
wget https://github.com/kubernetes/kubernetes/releases/download/v1.12.4/kubernetes.tar.gz tar -zxvf kubernetes.tar.gz mv kubernetes/cluster/addons/dns/coredns/coredns.yaml.base /etc/kubernetes/coredns/coredns.yaml
sed -i 's#kubernetes __PILLAR__DNS__DOMAIN__#kubernetes cluster.local.#g' coredns.yaml sed -i 's#clusterIP: __PILLAR__DNS__SERVER__#clusterIP: 10.254.0.2#g' coredns.yaml
kubectl apply -f coredns.yaml
kubectl get pod -n kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE calico-kube-controllers-7875f976cd-gxfdj 1/1 Running 1 20m 192.168.2.113 192.168.2.113 <none> calico-node-78gtd 2/2 Running 2 20m 192.168.2.111 192.168.2.111 <none> calico-node-dxw6z 2/2 Running 2 20m 192.168.2.113 192.168.2.113 <none> calico-node-wvrxd 2/2 Running 2 20m 192.168.2.112 192.168.2.112 <none> coredns-74c656b9f-9f8l8 1/1 Running 0 3m56s 172.16.70.131 192.168.2.113 <none>
kubectl get svc --all-namespaces NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 24h kube-system kube-dns ClusterIP 10.254.0.2 <none> 53/UDP,53/TCP 27s
# kubectl get nodes NAME STATUS ROLES AGE VERSION 192.168.2.111 Ready <none> 3h39m v1.12.4 192.168.2.112 Ready <none> 3h30m v1.12.4 192.168.2.113 Ready <none> 3h30m v1.12.4
# cat > nginx-web.yml << EOF apiVersion: v1 kind: Service metadata: name: nginx-web labels: tier: frontend spec: type: NodePort selector: tier: frontend ports: - name: http port: 80 targetPort: 80 --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: nginx-con labels: tier: frontend spec: replicas: 3 template: metadata: labels: tier: frontend spec: containers: - name: nginx-pod image: nginx ports: - containerPort: 80 EOF
kubectl create -f nginx-web.yml
# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE nginx-con-594b8d6b48-47b5l 1/1 Running 0 12s 172.16.70.135 192.168.2.113 <none> nginx-con-594b8d6b48-f2pzv 1/1 Running 0 12s 172.16.200.9 192.168.2.111 <none> nginx-con-594b8d6b48-g99mm 1/1 Running 0 12s 172.16.141.196 192.168.2.112 <none>
# ping -c 3 172.16.70.135 PING 172.16.70.135 (172.16.70.135) 56(84) bytes of data. 64 bytes from 172.16.70.135: icmp_seq=1 ttl=63 time=0.346 ms 64 bytes from 172.16.70.135: icmp_seq=2 ttl=63 time=0.145 ms 64 bytes from 172.16.70.135: icmp_seq=3 ttl=63 time=0.161 ms --- 172.16.70.135 ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 1999ms rtt min/avg/max/mdev = 0.145/0.217/0.346/0.092 ms
# ping -c 3 172.16.200.9 PING 172.16.200.9 (172.16.200.9) 56(84) bytes of data. 64 bytes from 172.16.200.9: icmp_seq=1 ttl=63 time=0.261 ms 64 bytes from 172.16.200.9: icmp_seq=2 ttl=63 time=0.187 ms 64 bytes from 172.16.200.9: icmp_seq=3 ttl=63 time=0.221 ms --- 172.16.200.9 ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 1999ms rtt min/avg/max/mdev = 0.187/0.223/0.261/0.030 ms
# ping -c 3 172.16.141.196 PING 172.16.141.196 (172.16.141.196) 56(84) bytes of data. 64 bytes from 172.16.141.196: icmp_seq=1 ttl=63 time=0.379 ms 64 bytes from 172.16.141.196: icmp_seq=2 ttl=63 time=0.221 ms 64 bytes from 172.16.141.196: icmp_seq=3 ttl=63 time=0.233 ms --- 172.16.141.196 ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 2000ms rtt min/avg/max/mdev = 0.221/0.277/0.379/0.074 ms
# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 43h nginx-web NodePort 10.254.29.144 <none> 80:30945/TCP 11m
# curl -I 192.168.2.111:30945 HTTP/1.1 200 OK Server: nginx/1.15.8 Date: Thu, 17 Jan 2019 03:43:21 GMT Content-Type: text/html Content-Length: 612 Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT Connection: keep-alive ETag: "5c21fedf-264" Accept-Ranges: bytes
curl -I 10.254.29.144 HTTP/1.1 200 OK Server: nginx/1.15.8 Date: Thu, 17 Jan 2019 03:44:06 GMT Content-Type: text/html Content-Length: 612 Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT Connection: keep-alive ETag: "5c21fedf-264" Accept-Ranges: bytes
# cat centos.yaml apiVersion: v1 kind: Pod metadata: name: centos-test namespace: default spec: containers: - image: centos command: - sleep - "3600" imagePullPolicy: IfNotPresent name: centos-test restartPolicy: Always
kubectl create -f centos.yaml
# kubectl exec -it centos-test -- yum install bind-utils curl -y
# kubectl exec -it centos-test -- curl -I 192.168.2.100:30945 HTTP/1.1 200 OK Server: nginx/1.15.8 Date: Thu, 17 Jan 2019 04:57:53 GMT Content-Type: text/html Content-Length: 612 Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT Connection: keep-alive ETag: "5c21fedf-264" Accept-Ranges: bytes
# kubectl exec -it centos-test -- curl -I nginx-web.default.svc.cluster.local HTTP/1.1 200 OK Server: nginx/1.15.8 Date: Thu, 17 Jan 2019 04:58:56 GMT Content-Type: text/html Content-Length: 612 Last-Modified: Tue, 25 Dec 2018 09:56:47 GMT Connection: keep-alive ETag: "5c21fedf-264" Accept-Ranges: bytes
# kubectl exec -it centos-test -- nslookup nginx-web.default.svc.cluster.local Server: 10.254.0.2 Address: 10.254.0.2#53 Name: nginx-web.default.svc.cluster.local Address: 10.254.29.144
# cat > front-proxy-csr.json << EOF { "CN": "system:front-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "System" } ] } EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-csr.json | cfssljson -bare front-proxy
USER=root CONTROL_PLANE_IPS="k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03" for host in $CONTROL_PLANE_IPS; do scp front-proxy-key.pem front-proxy.pem "${USER}"@$host:/etc/kubernetes/server/ssl/ done
--horizontal-pod-autoscaler-use-rest-clients=true
--requestheader-client-ca-file=/etc/kubernetes/server/ssl/ca.pem \ --requestheader-allowed-names=aggregator \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-group-headers=X-Remote-Group \ --requestheader-username-headers=X-Remote-User \ --proxy-client-cert-file=/etc/kubernetes/server/ssl/front-proxy.pem \ --proxy-client-key-file=/etc/kubernetes/server/ssl/front-proxy-key.pem \ --enable-aggregator-routing=true
systemctl daemon-reload systemctl restart kube-apiserver systemctl restart kube-controller-manager
wget https://github.com/kubernetes/kubernetes/releases/download/v1.12.4/kubernetes.tar.gz tar zxvf kubernetes.tar.gz cp -a kubernetes/cluster/addons/metrics-server/ /etc/kubernetes/
# cat > metrics-server-deployment.yaml << EOF --- apiVersion: v1 kind: ServiceAccount metadata: name: metrics-server namespace: kube-system --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: metrics-server namespace: kube-system labels: k8s-app: metrics-server spec: selector: matchLabels: k8s-app: metrics-server template: metadata: name: metrics-server labels: k8s-app: metrics-server spec: serviceAccountName: metrics-server volumes: # mount in tmp so we can safely use from-scratch images and/or read-only containers - name: tmp-dir emptyDir: {} containers: - name: metrics-server image: xiaoqshuo/metrics-server-amd64:v0.3.1 imagePullPolicy: Always command: - /metrics-server - --kubelet-insecure-tls - --kubelet-preferred-address-types=InternalIP volumeMounts: - name: tmp-dir mountPath: /tmp EOF
# kubectl apply -f metrics-server
# kubectl get -n kube-system all -o wide| grep metrics pod/metrics-server-56f4b88678-x9djk 1/1 Running 0 26m 172.16.200.12 192.168.2.111 <none> service/metrics-server ClusterIP 10.254.130.198 <none> 443/TCP 65m k8s-app=metrics-server deployment.apps/metrics-server 1 1 1 1 34m metrics-server xiaoqshuo/metrics-server-amd64:v0.3.1 k8s-app=metrics-server replicaset.apps/metrics-server-56f4b88678 1 1 1 26m metrics-server xiaoqshuo/metrics-server-amd64:v0.3.1 k8s-app=metrics-server,pod-template-hash=56f4b88678
wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
# ------------------- Dashboard Service ------------------- # kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: type: NodePort ports: - port: 443 targetPort: 8443 nodePort: 30000 selector: k8s-app: kubernetes-dashboard
# cat > user-admin.yaml << EOF # ------------------- ServiceAccount ------------------- # apiVersion: v1 kind: ServiceAccount metadata: name: user-admin namespace: kube-system --- # ------------------- ClusterRoleBinding ------------------- # apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: user-admin annotations: rbac.authorization.kubernetes.io/autoupdate: "true" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: user-admin namespace: kube-system EOF
kubectl apply -f kubernetes-dashboard.yaml kubectl apply -f user-admin.yaml
# kubectl get -n kube-system all -o wide| grep dashboard pod/kubernetes-dashboard-66468c4f76-nfdwv 1/1 Running 0 20m 172.16.195.1 192.168.2.103 <none> service/kubernetes-dashboard NodePort 10.254.58.73 <none> 443:30000/TCP 21m k8s-app=kubernetes-dashboard deployment.apps/kubernetes-dashboard 1 1 1 1 21m kubernetes-dashboard xiaoqshuo/kubernetes-dashboard-amd64:v1.10.1 k8s-app=kubernetes-dashboard replicaset.apps/kubernetes-dashboard-66468c4f76 1 1 1 20m kubernetes-dashboard xiaoqshuo/kubernetes-dashboard-amd64:v1.10.1 k8s-app=kubernetes-dashboard,pod-template-hash=66468c4f76
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep user-admin | awk '{print $1}')
--test-type --ignore-certificate-errors
#生成證書 openssl genrsa -out dashboard.key 2048 openssl req -days 3650 -new -out dashboard.csr -key dashboard.key -subj '/CN=**172.23.0.217**' openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt #刪除舊的證書secret kubectl delete secret kubernetes-dashboard-certs -n kube-system #建立新的證書secret kubectl create secret generic kubernetes-dashboard-certs --from-file="dashboard.key,dashboard.crt" -n kube-system