linux-node1:192.168.56.11 ---master 部署的服務: etcd kube-apiserver kube-controller-manager kube-scheduler docker linux-node2:192.168.56.12 ---node 部署的服務: etcd kubelet kube-proxy docker linux-node3:192.168.56.13 ---node 部署的服務: etcd kubelet kube-proxy docker
一、設置主機名 hostnamectl set-hostname linux-node1 hostnamectl set-hostname linux-node2 hostnamectl set-hostname linux-node3 ---------- 二、設置部署節點到其它全部節點的SSH免密碼登(包括本機) [root@linux-node1 ~]# ssh-keygen -t rsa [root@linux-node1 ~]# ssh-copy-id linux-node1 [root@linux-node1 ~]# ssh-copy-id linux-node2 [root@linux-node1 ~]# ssh-copy-id linux-node3 ---------- 三、綁定主機host cat > /etc/hosts <<EOF 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.56.11 linux-node1 192.168.56.12 linux-node2 192.168.56.13 linux-node3 EOF ---------- 四、關閉防火牆和selinux systemctl disable firewalld systemctl stop firewalld #關閉selinux sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/SELINUXTYPE=targeted/SELINUXTYPE=disabled/g" /etc/sysconfig/selinux ---------- 五、其餘配置 yum install -y ntpdate wget lrzsz vim net-tools #加入crontab 1 * * * * /usr/sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1 #vim /etc/profile 高亮顯示 export PS1="\[\e]0;\a\]\n\[\e[1;32m\]\[\e[1;33m\]\H\[\e[1;35m\]<\$(date +\"%Y-%m-%d %T\")> \[\e[32m\]\w\[\e[0m\]\n\u>\\$ " #設置時區 cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime #SSH登陸慢 sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config sed -i "s/GSSAPIAuthentication yes/GSSAPIAuthentication no/" /etc/ssh/sshd_config systemctl restart sshd.service
第一步:使用國內Docker源 cd /etc/yum.repos.d/ wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 第二步:Docker安裝 yum install -y docker-ce 第三步:啓動後臺進程 [root@linux-node2 ~]# systemctl start docker [root@linux-node2 ~]# systemctl enable docker Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
1.準備部署目錄 [root@linux-node1 ~]# mkdir -p /opt/kubernetes/{cfg,bin,ssl,log} #添加環境變量 [root@linux-node1 ~]# echo "PATH=$PATH:/opt/kubernetes/bin" >> /etc/profile [root@linux-node1 ~]# source /etc/profile 或者 [root@linux-node1 ~]# vim .bash_profile PATH=$PATH:$HOME/bin:/opt/kubernetes/bin [root@linux-node1 ~]# source .bash_profile 2.準備軟件包 #github下載連接 https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#server-binaries wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes-server-linux-amd64.tar.gz wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes-client-linux-amd64.tar.gz wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes-node-linux-amd64.tar.gz wget https://storage.googleapis.com/kubernetes-release/release/v1.10.8/kubernetes.tar.gz 3.解壓軟件包 tar -zxvf kubernetes.tar.gz -C /usr/local/src/ tar -zxvf kubernetes-server-linux-amd64.tar.gz -C /usr/local/src/ tar -zxvf kubernetes-client-linux-amd64.tar.gz -C /usr/local/src/ tar -zxvf kubernetes-node-linux-amd64.tar.gz -C /usr/local/src/
1.安裝 CFSSL [root@linux-node1 ~]# cd /usr/local/src [root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 [root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 [root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 [root@linux-node1 src]# chmod +x cfssl* [root@linux-node1 src]# mv cfssl-certinfo_linux-amd64 /opt/kubernetes/bin/cfssl-certinfo [root@linux-node1 src]# mv cfssljson_linux-amd64 /opt/kubernetes/bin/cfssljson [root@linux-node1 src]# mv cfssl_linux-amd64 /opt/kubernetes/bin/cfssl #複製cfssl命令文件到k8s-node1和k8s-node2節點。若是實際中多個節點,就都須要同步複製。 [root@linux-node1 ~]# scp /opt/kubernetes/bin/cfssl* 192.168.56.12:/opt/kubernetes/bin [root@linux-node1 ~]# scp /opt/kubernetes/bin/cfssl* 192.168.56.13:/opt/kubernetes/bin 2.初始化cfssl [root@linux-node1 src]# mkdir ssl && cd ssl [root@linux-node1 ssl]# cfssl print-defaults config > config.json --生成ca-config.json的樣例(可省略) [root@linux-node1 ssl]# cfssl print-defaults csr > csr.json --生成ca-csr.json的樣例(可省略) 3.建立用來生成 CA 文件的 JSON 配置文件 [root@linux-node1 ssl]# cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF [root@linux-node1 ssl]# 4.建立用來生成 CA 證書籤名請求(CSR)的 JSON 配置文件 [root@linux-node1 ssl]# cat > ca-csr.json <<EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "System" } ] } EOF 5.生成CA證書(ca.pem)和密鑰(ca-key.pem) [root@ linux-node1 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca - #執行上面的命令後,會生成下面三個文件 ca.csr ca-key.pem ca.pem [root@ linux-node1 ssl]# ls -l ca* -rw-r--r-- 1 root root 290 Mar 4 13:45 ca-config.json -rw-r--r-- 1 root root 1001 Mar 4 14:09 ca.csr -rw-r--r-- 1 root root 208 Mar 4 13:51 ca-csr.json -rw------- 1 root root 1679 Mar 4 14:09 ca-key.pem -rw-r--r-- 1 root root 1359 Mar 4 14:09 ca.pem 6.分發證書 [root@linux-node1 ssl]# cp ca.csr ca.pem ca-key.pem ca-config.json /opt/kubernetes/ssl #SCP證書到k8s-node1和k8s-node2節點 [root@linux-node1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.56.12:/opt/kubernetes/ssl [root@linux-node1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.56.13:/opt/kubernetes/ssl
0.準備etcd軟件包 wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz [root@linux-node1 src]# tar zxf etcd-v3.2.18-linux-amd64.tar.gz [root@linux-node1 src]# cd etcd-v3.2.18-linux-amd64 [root@linux-node1 etcd-v3.2.18-linux-amd64]# cp etcd etcdctl /opt/kubernetes/bin/ [root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.56.12:/opt/kubernetes/bin/ [root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.56.13:/opt/kubernetes/bin/ 1.建立 etcd 證書籤名請求: [root@linux-node1]# cd /usr/local/src/ssl cat > etcd-csr.json <<EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.56.11", "192.168.56.12", "192.168.56.13" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF 2.生成 etcd 證書和私鑰: [root@linux-node1 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes etcd-csr.json | cfssljson -bare etcd 會生成如下證書文件 [root@k8s-master ~]# ls -l etcd* -rw-r--r-- 1 root root 1045 Mar 5 11:27 etcd.csr -rw-r--r-- 1 root root 257 Mar 5 11:25 etcd-csr.json -rw------- 1 root root 1679 Mar 5 11:27 etcd-key.pem -rw-r--r-- 1 root root 1419 Mar 5 11:27 etcd.pem 3.將證書移動到/opt/kubernetes/ssl目錄下 [root@k8s-master ~]# cp etcd*.pem /opt/kubernetes/ssl [root@linux-node1 ~]# scp etcd*.pem 192.168.56.12:/opt/kubernetes/ssl [root@linux-node1 ~]# scp etcd*.pem 192.168.56.13:/opt/kubernetes/ssl [root@k8s-master ~]# rm -f etcd.csr etcd-csr.json 4.設置ETCD配置文件 [root@linux-node1 ~]# vim /opt/kubernetes/cfg/etcd.conf #[member] ETCD_NAME="etcd-node1" ETCD_DATA_DIR="/var/lib/etcd" #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" ETCD_LISTEN_PEER_URLS="https://192.168.56.11:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.56.11:2379,https://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" #[cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.56.11:2380" # if you use different ETCD_NAME (e.g. test), # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.56.11:2380,etcd-node2=https://192.168.56.12:2380,etcd-node3=https://192.168.56.13:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.56.11:2379" #[security] CLIENT_CERT_AUTH="true" ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" PEER_CLIENT_CERT_AUTH="true" ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" 5.建立ETCD系統服務 [root@linux-node1 ~]# vim /etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target [Service] Type=simple WorkingDirectory=/var/lib/etcd EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf # set GOMAXPROCS to number of processors ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd" Type=notify [Install] WantedBy=multi-user.target 6.將文件同步到其餘節點(並修改差別的地方) [root@linux-node1 ~]# scp /opt/kubernetes/cfg/etcd.conf 192.168.56.12:/opt/kubernetes/cfg/ [root@linux-node1 ~]# scp /etc/systemd/system/etcd.service 192.168.56.12:/etc/systemd/system/ [root@linux-node1 ~]# scp /opt/kubernetes/cfg/etcd.conf 192.168.56.13:/opt/kubernetes/cfg/ [root@linux-node1 ~]# scp /etc/systemd/system/etcd.service 192.168.56.13:/etc/systemd/system/ #node2的etcd.conf配置 [root@linux-node2 ~]# vim /opt/kubernetes/cfg/etcd.conf #[member] ETCD_NAME="etcd-node2" ETCD_DATA_DIR="/var/lib/etcd" #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" ETCD_LISTEN_PEER_URLS="https://192.168.56.12:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.56.12:2379,https://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" #[cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.56.12:2380" # if you use different ETCD_NAME (e.g. test), # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.56.11:2380,etcd-node2=https://192.168.56.12:2380,etcd-node3=https://192.168.56.13:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.56.12:2379" #[security] CLIENT_CERT_AUTH="true" ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" PEER_CLIENT_CERT_AUTH="true" ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" #node3的etcd.conf配置 [root@linux-node3 ~]# vim /opt/kubernetes/cfg/etcd.conf #[member] ETCD_NAME="etcd-node3" ETCD_DATA_DIR="/var/lib/etcd" #ETCD_SNAPSHOT_COUNTER="10000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" ETCD_LISTEN_PEER_URLS="https://192.168.56.13:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.56.13:2379,https://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" #ETCD_CORS="" #[cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.56.13:2380" # if you use different ETCD_NAME (e.g. test), # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.56.11:2380,etcd-node2=https://192.168.56.12:2380,etcd-node3=https://192.168.56.13:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.56.13:2379" #[security] CLIENT_CERT_AUTH="true" ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" PEER_CLIENT_CERT_AUTH="true" ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem" ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem" ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem" 在全部節點上建立etcd存儲目錄並啓動etcd [root@linux-node1 ~]# mkdir /var/lib/etcd [root@linux-node1 ~]# systemctl daemon-reload [root@linux-node1 ~]# systemctl enable etcd [root@linux-node1 ~]# systemctl restart etcd [root@linux-node1 ~]# systemctl status etcd 以上須要你們在全部的 etcd 節點重複上面的步驟,直到全部機器的 etcd 服務都已啓動。 7.驗證集羣 [root@linux-node1 ~]# etcdctl --endpoints=https://192.168.56.11:2379 \ --ca-file=/opt/kubernetes/ssl/ca.pem \ --cert-file=/opt/kubernetes/ssl/etcd.pem \ --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health member 435fb0a8da627a4c is healthy: got healthy result from https://192.168.56.12:2379 member 6566e06d7343e1bb is healthy: got healthy result from https://192.168.56.11:2379 member ce7b884e428b6c8c is healthy: got healthy result from https://192.168.56.13:2379 cluster is healthy
部署Kubernetes API服務部署node
0.準備軟件包 [root@linux-node1 ~]# cd /usr/local/src/kubernetes [root@linux-node1 kubernetes]# cp server/bin/kube-apiserver /opt/kubernetes/bin/ [root@linux-node1 kubernetes]# cp server/bin/kube-controller-manager /opt/kubernetes/bin/ [root@linux-node1 kubernetes]# cp server/bin/kube-scheduler /opt/kubernetes/bin/ 1.建立生成CSR的 JSON 配置文件 [root@linux-node1 ~]# cd /usr/local/src/ssl [root@linux-node1 src]# vim kubernetes-csr.json cat > kubernetes-csr.json <<EOF { "CN": "kubernetes", "hosts": [ "127.0.0.1", "192.168.56.11", "10.1.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF 2.生成 kubernetes 證書和私鑰 [root@linux-node1 src]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes #執行上面的命令,會生成以下兩個文件 kubernetes-key.pem kubernetes.pem [root@linux-node1 src]# cp kubernetes*.pem /opt/kubernetes/ssl/ [root@linux-node1 ~]# scp kubernetes*.pem 192.168.56.12:/opt/kubernetes/ssl/ [root@linux-node1 ~]# scp kubernetes*.pem 192.168.56.13:/opt/kubernetes/ssl/ 3.建立 kube-apiserver 使用的客戶端 token 文件 [root@linux-node1 ~]# head -c 16 /dev/urandom | od -An -t x | tr -d ' ' ad6d5bb607a186796d8861557df0d17f [root@linux-node1 ~]# vim /opt/kubernetes/ssl/bootstrap-token.csv ad6d5bb607a186796d8861557df0d17f,kubelet-bootstrap,10001,"system:kubelet-bootstrap" 4.建立基礎用戶名/密碼認證配置 [root@linux-node1 ~]# vim /opt/kubernetes/ssl/basic-auth.csv admin,admin,1 readonly,readonly,2 5.部署Kubernetes API Server [root@linux-node1 ~]# vim /usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] ExecStart=/opt/kubernetes/bin/kube-apiserver \ --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \ --bind-address=192.168.56.11 \ --insecure-bind-address=127.0.0.1 \ --authorization-mode=Node,RBAC \ --runtime-config=rbac.authorization.k8s.io/v1 \ --kubelet-https=true \ --anonymous-auth=false \ --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \ --enable-bootstrap-token-auth \ --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \ --service-cluster-ip-range=10.1.0.0/16 \ --service-node-port-range=20000-40000 \ --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \ --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \ --client-ca-file=/opt/kubernetes/ssl/ca.pem \ --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \ --etcd-cafile=/opt/kubernetes/ssl/ca.pem \ --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \ --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \ --etcd-servers=https://192.168.56.11:2379,https://192.168.56.12:2379,https://192.168.56.13:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/opt/kubernetes/log/api-audit.log \ --event-ttl=1h \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target 6.啓動API Server服務 [root@linux-node1 ~]# systemctl daemon-reload [root@linux-node1 ~]# systemctl enable kube-apiserver [root@linux-node1 ~]# systemctl start kube-apiserver 查看API Server服務狀態 [root@linux-node1 ~]# systemctl status kube-apiserver
部署Controller Manager服務linux
[root@linux-node1 ~]# vim /usr/lib/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/opt/kubernetes/bin/kube-controller-manager \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --allocate-node-cidrs=true \ --service-cluster-ip-range=10.1.0.0/16 \ --cluster-cidr=10.2.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \ --root-ca-file=/opt/kubernetes/ssl/ca.pem \ --leader-elect=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target 3.啓動Controller Manager [root@linux-node1 ~]# systemctl daemon-reload [root@linux-node1 scripts]# systemctl enable kube-controller-manager [root@linux-node1 scripts]# systemctl start kube-controller-manager 4.查看服務狀態 [root@linux-node1 scripts]# systemctl status kube-controller-manager
部署Kubernetes Schedulergit
[root@linux-node1 ~]# vim /usr/lib/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/opt/kubernetes/bin/kube-scheduler \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --leader-elect=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target 2.部署服務 [root@linux-node1 ~]# systemctl daemon-reload [root@linux-node1 scripts]# systemctl enable kube-scheduler [root@linux-node1 scripts]# systemctl start kube-scheduler [root@linux-node1 scripts]# systemctl status kube-scheduler
部署kubectl 命令行工具github
1.準備二進制命令包 [root@linux-node1 ~]# cd /usr/local/src/kubernetes/client/bin [root@linux-node1 bin]# cp kubectl /opt/kubernetes/bin/ #須要將kubectl複製到node節點 [root@linux-node1 bin]# scp /opt/kubernetes/bin/kubectl linux-node2:/opt/kubernetes/bin/ [root@linux-node1 bin]# scp /opt/kubernetes/bin/kubectl linux-node3:/opt/kubernetes/bin/ 2.建立 admin 證書籤名請求 [root@linux-node1 ~]# cd /usr/local/src/ssl/ [root@linux-node1 ssl]# cat > admin-csr.json <<EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:masters", "OU": "System" } ] } EOF 3.生成 admin 證書和私鑰: [root@linux-node1 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes admin-csr.json | cfssljson -bare admin [root@linux-node1 ssl]# ls -l admin* -rw-r--r-- 1 root root 1009 Mar 5 12:29 admin.csr -rw-r--r-- 1 root root 229 Mar 5 12:28 admin-csr.json -rw------- 1 root root 1675 Mar 5 12:29 admin-key.pem -rw-r--r-- 1 root root 1399 Mar 5 12:29 admin.pem [root@linux-node1 src]# mv admin*.pem /opt/kubernetes/ssl/ 4.設置集羣參數 [root@linux-node1 src]# kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.56.11:6443 Cluster "kubernetes" set. 5.設置客戶端認證參數 [root@linux-node1 src]# kubectl config set-credentials admin \ --client-certificate=/opt/kubernetes/ssl/admin.pem \ --embed-certs=true \ --client-key=/opt/kubernetes/ssl/admin-key.pem User "admin" set. 6.設置上下文參數 [root@linux-node1 src]# kubectl config set-context kubernetes \ --cluster=kubernetes \ --user=admin Context "kubernetes" created. 7.設置默認上下文 [root@linux-node1 src]# kubectl config use-context kubernetes Switched to context "kubernetes". 8.使用kubectl工具 [root@linux-node1 ~]# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-1 Healthy {"health":"true"} etcd-2 Healthy {"health":"true"} etcd-0 Healthy {"health":"true"}
部署kubeletdocker
1.二進制包準備 將軟件包從linux-node1複製到linux-node2中去。 [root@linux-node1 ~]# cd /usr/local/src/kubernetes/server/bin/ [root@linux-node1 bin]# cp kubelet kube-proxy /opt/kubernetes/bin/ [root@linux-node1 bin]# scp kubelet kube-proxy 192.168.56.12:/opt/kubernetes/bin/ [root@linux-node1 bin]# scp kubelet kube-proxy 192.168.56.13:/opt/kubernetes/bin/ 2.建立角色綁定 [root@linux-node1 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap clusterrolebinding "kubelet-bootstrap" created 3.建立 kubelet bootstrapping kubeconfig 文件 設置集羣參數 [root@linux-node1 ~]# kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.56.11:6443 \ --kubeconfig=bootstrap.kubeconfig Cluster "kubernetes" set. 設置客戶端認證參數 [root@linux-node1 ~]# kubectl config set-credentials kubelet-bootstrap \ --token=ad6d5bb607a186796d8861557df0d17f \ --kubeconfig=bootstrap.kubeconfig User "kubelet-bootstrap" set. 設置上下文參數 [root@linux-node1 ~]# kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig Context "default" created. 選擇默認上下文 [root@linux-node1 ~]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig Switched to context "default". [root@linux-node1 kubernetes]# cp bootstrap.kubeconfig /opt/kubernetes/cfg [root@linux-node1 kubernetes]# scp bootstrap.kubeconfig 192.168.56.12:/opt/kubernetes/cfg [root@linux-node1 kubernetes]# scp bootstrap.kubeconfig 192.168.56.13:/opt/kubernetes/cfg 在node節點上操做,部署kubelet 1.設置CNI支持 [root@linux-node2 ~]# mkdir -p /etc/cni/net.d [root@linux-node2 ~]# vim /etc/cni/net.d/10-default.conf cat > /etc/cni/net.d/10-default.conf <<EOF { "name": "flannel", "type": "flannel", "delegate": { "bridge": "docker0", "isDefaultGateway": true, "mtu": 1400 } } EOF 2.建立kubelet目錄 [root@linux-node2 ~]# mkdir /var/lib/kubelet 3.建立kubelet服務配置 [root@k8s-node2 ~]# vim /usr/lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/opt/kubernetes/bin/kubelet \ --address=192.168.56.12 \ --hostname-override=192.168.56.12 \ --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \ --cert-dir=/opt/kubernetes/ssl \ --network-plugin=cni \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/opt/kubernetes/bin/cni \ --cluster-dns=10.1.0.2 \ --cluster-domain=cluster.local. \ --hairpin-mode hairpin-veth \ --allow-privileged=true \ --fail-swap-on=false \ --logtostderr=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target 4.啓動Kubelet [root@linux-node2 ~]# systemctl daemon-reload [root@linux-node2 ~]# systemctl enable kubelet [root@linux-node2 ~]# systemctl start kubelet 5.查看服務狀態 [root@linux-node2 kubernetes]# systemctl status kubelet 6.查看csr請求 注意是在linux-node1上執行。 [root@linux-node1 ~]# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-0_w5F1FM_la_SeGiu3Y5xELRpYUjjT2icIFk9gO9KOU 1m kubelet-bootstrap Pending 7.批准kubelet 的 TLS 證書請求 [root@linux-node1 ~]# kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve 成功後是Approved的狀態 [root@linux-node1 bin]# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-IPMSFbKvwgq2icOeIo2v_WA-qb8QCyA7MT5h4eDmjxg 2m kubelet-bootstrap Approved,Issued 執行完畢後,查看節點狀態已是Ready的狀態了 [root@linux-node1 ~]# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.56.12 Ready <none> 2m v1.10.8
部署Kubernetes Proxyjson
1.配置kube-proxy使用LVS [root@linux-node2 ~]# yum install -y ipvsadm ipset conntrack 2.建立 kube-proxy 證書請求 [root@linux-node1 ~]# cd /usr/local/src/ssl/ [root@linux-node1 ~]# vim kube-proxy-csr.json cat > kube-proxy-csr.json <<EOF { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF 3.生成證書 [root@linux-node1~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy 4.分發證書到全部Node節點 [root@linux-node1 ssl]# cp kube-proxy*.pem /opt/kubernetes/ssl/ [root@linux-node1 ssl]# scp kube-proxy*.pem linux-node2:/opt/kubernetes/ssl/ [root@linux-node1 ssl]# scp kube-proxy*.pem linux-node3:/opt/kubernetes/ssl/ 5.建立kube-proxy配置文件 [root@linux-node1 ~]# kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=https://192.168.56.11:6443 \ --kubeconfig=kube-proxy.kubeconfig Cluster "kubernetes" set. [root@linux-node1 ~]# kubectl config set-credentials kube-proxy \ --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \ --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig User "kube-proxy" set. [root@linux-node1 ~]# kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig Context "default" created. [root@linux-node1 ~]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig Switched to context "default". 6.分發kubeconfig配置文件 [root@linux-node1 ssl]# cp kube-proxy.kubeconfig /opt/kubernetes/cfg/ [root@linux-node1 ~]# scp kube-proxy.kubeconfig 192.168.56.12:/opt/kubernetes/cfg/ [root@linux-node1 ~]# scp kube-proxy.kubeconfig 192.168.56.13:/opt/kubernetes/cfg/ 7.建立kube-proxy服務配置 [root@linux-node2 bin]# mkdir /var/lib/kube-proxy [root@k8s-node2 ~]# vim /usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/opt/kubernetes/bin/kube-proxy \ --bind-address=192.168.56.12 \ --hostname-override=192.168.56.12 \ --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \ --masquerade-all \ --feature-gates=SupportIPVSProxyMode=true \ --proxy-mode=ipvs \ --ipvs-min-sync-period=5s \ --ipvs-sync-period=5s \ --ipvs-scheduler=rr \ --logtostderr=true \ --v=2 \ --logtostderr=false \ --log-dir=/opt/kubernetes/log Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target 8.啓動Kubernetes Proxy [root@linux-node2 ~]# systemctl daemon-reload [root@linux-node2 ~]# systemctl enable kube-proxy [root@linux-node2 ~]# systemctl start kube-proxy 9.查看服務狀態 查看kube-proxy服務狀態 [root@linux-node2 scripts]# systemctl status kube-proxy 檢查LVS狀態 [root@linux-node2 ~]# ipvsadm -L -n IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 10.1.0.1:443 rr persistent 10800 -> 192.168.56.11:6443 Masq 1 0 0 若是你在兩臺實驗機器都安裝了kubelet和proxy服務,使用下面的命令能夠檢查狀態: [root@linux-node1 ssl]# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.56.12 Ready <none> 22m v1.10.1 192.168.56.13 Ready <none> 3m v1.10.1 linux-node3節點請自行部署。
1.爲Flannel生成證書 [root@linux-node1 ~]# vim flanneld-csr.json cat > flanneld-csr.json <<EOF { "CN": "flanneld", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF 2.生成證書 [root@linux-node1 ~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \ -ca-key=/opt/kubernetes/ssl/ca-key.pem \ -config=/opt/kubernetes/ssl/ca-config.json \ -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld 3.分發證書 [root@linux-node1 ~]# cp flanneld*.pem /opt/kubernetes/ssl/ [root@linux-node1 ~]# scp flanneld*.pem 192.168.56.12:/opt/kubernetes/ssl/ [root@linux-node1 ~]# scp flanneld*.pem 192.168.56.13:/opt/kubernetes/ssl/ 4.下載Flannel軟件包 [root@linux-node1 ~]# cd /usr/local/src wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz [root@linux-node1 src]# tar zxf flannel-v0.10.0-linux-amd64.tar.gz [root@linux-node1 src]# cp flanneld mk-docker-opts.sh /opt/kubernetes/bin/ 複製到linux-node2節點 [root@linux-node1 src]# scp flanneld mk-docker-opts.sh 192.168.56.12:/opt/kubernetes/bin/ [root@linux-node1 src]# scp flanneld mk-docker-opts.sh 192.168.56.13:/opt/kubernetes/bin/ 複製對應腳本到/opt/kubernetes/bin目錄下。 [root@linux-node1 ~]# cd /usr/local/src/kubernetes/cluster/centos/node/bin/ [root@linux-node1 bin]# cp remove-docker0.sh /opt/kubernetes/bin/ [root@linux-node1 bin]# scp remove-docker0.sh 192.168.56.12:/opt/kubernetes/bin/ [root@linux-node1 bin]# scp remove-docker0.sh 192.168.56.13:/opt/kubernetes/bin/ 5.配置Flannel [root@linux-node1 ~]# vim /opt/kubernetes/cfg/flannel FLANNEL_ETCD="-etcd-endpoints=https://192.168.56.11:2379,https://192.168.56.12:2379,https://192.168.56.13:2379" FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network" FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem" FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem" FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem" 複製配置到其它節點上 [root@linux-node1 ~]# scp /opt/kubernetes/cfg/flannel 192.168.56.12:/opt/kubernetes/cfg/ [root@linux-node1 ~]# scp /opt/kubernetes/cfg/flannel 192.168.56.13:/opt/kubernetes/cfg/ 6.設置Flannel系統服務 [root@linux-node1 ~]# vim /usr/lib/systemd/system/flannel.service [Unit] Description=Flanneld overlay address etcd agent After=network.target Before=docker.service [Service] EnvironmentFile=-/opt/kubernetes/cfg/flannel ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE} ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker Type=notify [Install] WantedBy=multi-user.target RequiredBy=docker.service 複製系統服務腳本到其它節點上 scp /usr/lib/systemd/system/flannel.service 192.168.56.12:/usr/lib/systemd/system/ scp /usr/lib/systemd/system/flannel.service 192.168.56.13:/usr/lib/systemd/system/ Flannel CNI集成 下載CNI插件 [root@linux-node1 ~]# cd /usr/local/src https://github.com/containernetworking/plugins/releases wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz [root@linux-node1 ~]# mkdir /opt/kubernetes/bin/cni -p [root@linux-node1 src]# tar zxf cni-plugins-amd64-v0.7.1.tgz -C /opt/kubernetes/bin/cni scp -r /opt/kubernetes/bin/cni 192.168.56.12:/opt/kubernetes/bin/cni/ scp -r /opt/kubernetes/bin/cni 192.168.56.13:/opt/kubernetes/bin/cni/ 建立Etcd的key /opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem \ --cert-file /opt/kubernetes/ssl/flanneld.pem \ --key-file /opt/kubernetes/ssl/flanneld-key.pem \ --no-sync -C https://192.168.56.11:2379,https://192.168.56.12:2379,https://192.168.56.13:2379 \ mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' 啓動flannel [root@linux-node1 ~]# systemctl daemon-reload [root@linux-node1 ~]# systemctl enable flannel [root@linux-node1 ~]# chmod +x /opt/kubernetes/bin/* [root@linux-node1 ~]# systemctl start flannel 查看服務狀態 [root@linux-node1 ~]# systemctl status flannel 配置Docker使用Flannel [root@linux-node1 ~]# vim /usr/lib/systemd/system/docker.service [Unit] #在Unit下面修改After和增長Requires After=network-online.target flannel.service Wants=network-online.target Requires=flannel.service [Service] #增長EnvironmentFile=-/run/flannel/docker Type=notify EnvironmentFile=-/run/flannel/docker ExecStart=/usr/bin/dockerd $DOCKER_OPTS 最終配置 cat /usr/lib/systemd/system/docker.service [Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.com After=network.target flannel.service Requires=flannel.service [Service] Type=notify EnvironmentFile=-/run/flannel/docker EnvironmentFile=-/opt/kubernetes/cfg/docker ExecStart=/usr/bin/dockerd $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPTS LimitNOFILE=1048576 LimitNPROC=1048576 ExecReload=/bin/kill -s HUP $MAINPID # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. #TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process # restart the docker process if it exits prematurely Restart=on-failure StartLimitBurst=3 StartLimitInterval=60s [Install] WantedBy=multi-user.target 將配置複製到另外兩個階段 scp /usr/lib/systemd/system/docker.service 192.168.56.12:/usr/lib/systemd/system/ scp /usr/lib/systemd/system/docker.service 192.168.56.13:/usr/lib/systemd/system/ 重啓Docker [root@linux-node1 ~]# systemctl daemon-reload [root@linux-node1 ~]# systemctl restart docker
1.建立一個測試用的deployment [root@linux-node1 ~]# kubectl run net-test --image=alpine --replicas=2 sleep 360000 2.查看獲取IP狀況 [root@linux-node1 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE net-test-74f45db489-gmgv8 1/1 Running 0 1m 10.2.83.2 192.168.56.13 net-test-74f45db489-pr5jc 1/1 Running 0 1m 10.2.59.2 192.168.56.12 3.測試聯通性(在對應的node節點去測試) ping 10.2.83.2