角色 | 主機名 | 主機ip | 組件 |
---|---|---|---|
etcd | etcd | 192.168.0.106 | etcd |
master | kube-master | 192.168.0.107 | kube-apiserver,kube-controller-manager,kube-scheduler |
node1 | kube-node1 | 192.168.0.108 | kubelet,kube-proxy,docker |
node2 | kube-node2 | 192.168.0.109 | kubelet,kube-proxy,docker |
node3 | kube-node3 | 192.168.0.110 | kubelet,kube-proxy,docker |
systemctl stop firewalld.service systemctl disable firewalld.service
setenforce 0 grep -E '^SELINUX=' /etc/sysconfig/selinux //若是是enforcing,則將其關閉 [root@node ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux [root@node ~]# grep -E '^SELINUX=' /etc/sysconfig/selinux SELINUX=disabled
mkdir -pv /opt/kubernetes/{bin,cfg} && echo "export PATH=$PATH:/opt/kubernetes/bin" >> /etc/profile && source /etc/profile
在kubernetes in github的倉庫中找到CHANGELOG-*.md
,裏面會找到各個版本的二進制文件,以及安裝K8s平臺組件的Shell安裝腳本,==注意這些腳本在安裝的時候要根據本身實際存放二進制的位置作出相應的調整==html
-rw-r--r--. 1 root root 15628779 Aug 4 16:41 kubernetes-client-linux-1.9.1-amd64.tar.gz -rw-r--r--. 1 root root 418747484 Aug 4 16:41 kubernetes-server-linux-1.9.1-amd64.tar.gz -rw-r--r--. 1 root root 2877413 Aug 4 18:28 kubernetes.tar.gz
tar xf kubernetes-client-linux-1.9.1-amd64.tar.gz tar xf kubernetes-server-linux-1.9.1-amd64.tar.gz tar xf kubernetes.tar.gz
解壓以後,三個壓縮包文件解壓後都會存放在同一個叫作Kubernetes的目錄中
==準備kube-master節點運行的文件==node
cp /root/kubernetes/cluster/centos/master/scripts/{apiserver.sh,controller-manager.sh,scheduler.sh} /opt/kubernetes/ cp /root/kubernetes/server/bin/{kube-controller-manager,kube-scheduler,kube-apiserver,kubectl} /opt/kubernetes/bin/
==準備kube-node節點運行的文件==linux
cp /root/kubernetes/cluster/centos/node/scripts/{proxy.sh,kubelet.sh} /opt/kubernetes/ cp /root/kubernetes/server/bin/{kubelet,kube-proxy} /opt/kubernetes/bin
由於是測試環境,咱們暫且安裝一個etcd單節點nginx
yum install epel-release -y yum install etcd [root@etcd ~]# cat /etc/etcd/etcd.conf |awk '{if($0 !~ /^$/ && $0 !~ /^#/) {print $0}}' ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_CLIENT_URLS="http://192.168.0.106:2379" ETCD_NAME="k8setcd" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.0.106:2379" systemctl start etcd systemctl status etcd systemctl enable etcd journalctl -u etcd //查看日誌
==腳本中須要注意的地方==git
[root@master kubernetes]# cat apiserver.sh |awk '{if($0 !~ /^$/ && $0 !~ /^#/) {print $0}}' MASTER_ADDRESS=${1:-"192.168.0.107"} ETCD_SERVERS=${2:-"https://192.168.0.106:2379"} SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/24"} ADMISSION_CONTROL=${4:-""} cat <<EOF >/opt/kubernetes/cfg/kube-apiserver KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=4" KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}" KUBE_API_ADDRESS="--insecure-bind-address=192.168.0.107" KUBE_API_PORT="--insecure-port=8080" NODE_PORT="--kubelet-port=10250" KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}" EOF KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\ \${KUBE_LOG_LEVEL} \\ \${KUBE_ETCD_SERVERS} \\ \${KUBE_ETCD_CAFILE} \\ \${KUBE_ETCD_CERTFILE} \\ \${KUBE_ETCD_KEYFILE} \\ \${KUBE_API_ADDRESS} \\ \${KUBE_API_PORT} \\ \${NODE_PORT} \\ \${KUBE_ADVERTISE_ADDR} \\ \${KUBE_ALLOW_PRIV} \\ \${KUBE_SERVICE_ADDRESSES} \\ \${KUBE_ADMISSION_CONTROL} \\ \${KUBE_API_CLIENT_CA_FILE} \\ \${KUBE_API_TLS_CERT_FILE} \\ \${KUBE_API_TLS_PRIVATE_KEY_FILE}" cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS} Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kube-apiserver systemctl restart kube-apiserver
//由於咱們修改過腳本,所以這些參數加不加都無所謂 /opt/kubernetes/apiserver.sh 192.168.0.107 http://192.168.0.106:2379 /opt/kubernetes/scheduler.sh 192.168.0.107 /opt/kubernetes/controller-manager.sh 192.168.0.107
[root@master kubernetes]# ps -ef | grep kube | grep -v grep root 2280 1 2 17:20 ? 00:04:32 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=http://192.168.0.106:2379 --insecure-bind-address=192.168.0.107 --insecure-port=8080 --kubelet-port=10250 --advertise-address=192.168.0.107 --allow-privileged=false --service-cluster-ip-range=10.10.10.0/24 --admission-control= root 2404 1 1 17:23 ? 00:01:53 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=192.168.0.107:8080 --leader-elect root 2554 1 1 17:25 ? 00:03:16 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=192.168.0.107:8080 --leader-elect
/var/log/message
,ubuntu系統在/var/log/syslog
[root@master ~]# journalctl -u kube-apiserver -- Logs begin at 六 2018-08-04 18:19:18 CST, end at 六 2018-08-04 20:06:56 CST. -- 8月 04 18:19:18 master kube-apiserver[2280]: I0804 18:19:18.706331 2280 get.go:238] Starting watch for /apis/extensions/v1beta1/da 8月 04 18:19:18 master kube-apiserver[2280]: I0804 18:19:18.718459 2280 wrap.go:42] GET /api/v1/namespaces/default: (2.497133ms) 2 8月 04 18:19:18 master kube-apiserver[2280]: I0804 18:19:18.722771 2280 wrap.go:42] GET /api/v1/namespaces/default/services/kubern 8月 04 18:19:18 master kube-apiserver[2280]: I0804 18:19:18.727262 2280 wrap.go:42] GET /api/v1/namespaces/default/endpoints/kuber 8月 04 18:19:19 master kube-apiserver[2280]: I0804 18:19:19.013099 2280 wrap.go:42] GET /api/v1/namespaces/kube-system/endpoints/k
/opt/kubernetes/kubelet.sh /opt/kubernetes/proxy.sh [root@node kubernetes]# ps -ef | grep kube|grep -v grep root 2821 1 2 18:41 ? 00:02:08 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --address=192.168.0.108 --port=10250 --hostname-override=192.168.0.108 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --allow-privileged=false --cluster-dns=192.168.0.1 --cluster-domain=cluster.local root 2955 1 0 18:43 ? 00:00:13 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.0.108 --master=http://192.168.0.107:8080
[root@master ~]# kubectl get componentstatus NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health": "true"}
kubectl run nginx --image=nginx --replicas=3