基礎環境node
系統環境# cat /etc/redhat-releaseCentOS Linux release 7.3.1611 (Core) 主機名設置 centos-master 192.168.59.135 centos-minion1 192.168.59.132 centos-minion2 192.168.59.133
關閉selinux 和 firewalld 後重啓服務器linux
# systemctl stop firewalld # systemctl disable firewalld # setenforce 0 # sed -i 's/^SELINUX=.*/SELINUX=disableds/' /etc/selinux/config
三個節點 安裝並部署etcd集羣git
# yum install etcd -y 安裝版本 # rpm -qa | grep etcd etcd-3.2.7-1.el7.x86_64
配置ETCD /etc/etcd/etcd.confdocker
Master etcd 配置 # cat /etc/etcd/etcd.conf | grep -Ev "^#|^$" ETCD_NAME=centos-master ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.135:2380" ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.135:2379"
Minion2 etcd 配置 # grep -Ev "^#|^$" /etc/etcd/etcd.conf ETCD_NAME=centos-minion2 ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.133:2380" ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.133:2379"
Minion1 etcd 配置 # grep -Ev "^#|^$" /etc/etcd/etcd.conf ETCD_NAME=centos-minion1 ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.132:2380" ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.132:2379"
啓動etcd集羣(三個節點) 並檢查狀態(任意一臺上操做) # 啓動etcd # systemctl start etcd # systemctl enable etcd # 查看狀態 # etcdctl member list 10a23ff41e3abcb8: name=centos-minion1 peerURLs=http://192.168.59.132:2380 clientURLs=http://192.168.59.132:2379 isLeader=false 168ea6ce7632b2e4: name=centos-minion2 peerURLs=http://192.168.59.133:2380 clientURLs=http://192.168.59.133:2379 isLeader=true 587d83f824bf96c6: name=centos-master peerURLs=http://192.168.59.135:2380 clientURLs=http://192.168.59.135:2379 isLeader=false # etcdctl cluster-health member 10a23ff41e3abcb8 is healthy: got healthy result from http://192.168.59.132:2379 member 168ea6ce7632b2e4 is healthy: got healthy result from http://192.168.59.133:2379 member 587d83f824bf96c6 is healthy: got healthy result from http://192.168.59.135:2379 cluster is healthy
kubernetes master 節點安裝部署centos
#yum install kubernetes -y 安裝的版本 # rpm -qa | grep kubernetes kubernetes-client-1.5.2-0.7.git269f928.el7.x86_64 kubernetes-1.5.2-0.7.git269f928.el7.x86_64 kubernetes-master-1.5.2-0.7.git269f928.el7.x86_64 kubernetes-node-1.5.2-0.7.git269f928.el7.x86_64
配置kubernetes API Server (/etc/kubernetes/apiserver) # cat /etc/kubernetes/apiserver | grep -Ev "^#|^$" KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" KUBE_API_PORT="--port=8080" KUBELET_PORT="--kubelet-port=10250" KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota" KUBE_API_ARGS=""
配置kubernetes config (/etc/kubernetes/config) # cat /etc/kubernetes/config | grep -Ev "^#|^$" KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://centos-master:8080"
kubernetes minion 節點安裝(minion1和minion2)api
# yum install flannel docker kubernetes -y
配置 flannel (/etc/sysconfig/flanneld) # grep -Ev "^#|^$" /etc/sysconfig/flanneld FLANNEL_ETCD_ENDPOINTS="http://192.168.59.133:2379" FLANNEL_ETCD_PREFIX="/atomic.io/network"
配置 kubelet (/etc/kubernetes/kubelet) # grep -Ev "^#|^$" /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=0.0.0.0" KUBELET_PORT="--port=10250" KUBELET_HOSTNAME="--hostname-override=centos-minion2" KUBELET_API_SERVER="--api-servers=http://centos-master:8080" # 下面請填寫你的registry地址,若是你能鏈接到任何網絡,請自動過濾 # KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=192.168.59.133:5000/pod-infrastructure:latest" # 下面填寫你的dns信息和網絡信息 KUBELET_ARGS="--cluster-dns=192.168.51.198 --cluster-domain=atomic.io/network"
啓動程序服務器
kubernetes master for SERVICES in kube-apiserver kube-controller-manager kube-scheduler; dosystemctl restart $SERVICESsystemctl enable $SERVICESsystemctl status $SERVICES -ldone etcd 網絡配置 # etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
kubernetes minion for SERVICES in kube-proxy kubelet docker flanneld; do systemctl restart $SERVICES systemctl enable $SERVICES systemctl status $SERVICES done
查看 節點狀況(在 master)網絡
# kubectl get nodes NAME STATUS AGE centos-minion1 Ready 1h centos-minion2 Ready 1h
查看flannel網卡dom
[root@centos-minion1 ~]# ifconfig flannel0 flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1472 inet 172.17.34.0 netmask 255.255.0.0 destination 172.17.34.0 unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 [root@centos-minion2 ~]# ifconfig flannel0 flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1472 inet 172.17.59.0 netmask 255.255.0.0 destination 172.17.59.0 inet6 fe80::2d54:2169:1a0:d364 prefixlen 64 scopeid 0x20<link> unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 3 bytes 144 (144.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
簡單測試 (在master上建立一個Pod、Service 和 RC)curl
# ls http-pod.yaml http-rc.yaml http-service.yaml # cat http-pod.yaml apiVersion: v1 kind: Pod metadata: name: http-pod labels: name: http-pod spec: containers: - name: http image: 192.168.59.133:5000/centos6-http ports: - containerPort: 80 # cat http-service.yaml apiVersion: v1 kind: Service metadata: name: http-service spec: type: NodePort ports: - port: 80 nodePort: 30001 selector: name: http-pod # cat http-rc.yaml apiVersion: v1 kind: ReplicationController metadata: name: http-rc spec: replicas: 2 selector: name: http-pod template: metadata: labels: name: http-pod spec: containers: - name: http-pod image: 192.168.59.133:5000/centos6-http ports: - containerPort: 80
建立Pod # kubectl create -f http-pod.yaml pod "http-pod" created # kubectl get pods NAME READY STATUS RESTARTS AGE http-pod 1/1 Running 0 4s
建立Service # kubectl create -f http-service.yaml service "http-service" created # kubectl get service NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE http-service 10.254.235.49 <nodes> 80:30001/TCP 5s kubernetes 10.254.0.1 <none> 443/TCP 1d
查看 pod在哪一個節點上生成 # kubectl describe service Name: http-service Namespace: default Labels: <none> Selector: name=http-pod Type: NodePort IP: 10.254.235.49 Port: <unset> 80/TCP NodePort: <unset> 30001/TCP Endpoints: 172.17.59.3:80 #這個地址是flannel的地址 爲minion2 Session Affinity: None No events. Name: kubernetes Namespace: default Labels: component=apiserver provider=kubernetes Selector: <none> Type: ClusterIP IP: 10.254.0.1 Port: https 443/TCP Endpoints: 192.168.59.135:6443 Session Affinity: ClientIP No events. 訪問pod 會顯示出http默認的歡迎頁面 # curl http://192.168.59.133:30001/
建立RC # kubectl create -f http-rc.yaml replicationcontroller "http-rc" created # kubectl get rc NAME DESIRED CURRENT READY AGE http-rc 2 2 2 8s # kubectl get pods # 原本是一個如今有2個 NAME READY STATUS RESTARTS AGE http-pod 1/1 Running 0 9m http-rc-b24kx 1/1 Running 0 13s
如今刪除一個pod 看能不能在生成pod # kubectl delete pod http-pod pod "http-pod" deleted # kubectl get pods NAME READY STATUS RESTARTS AGE http-rc-8cl5p 1/1 Running 0 2s http-rc-b24kx 1/1 Running 0 2m # kubectl delete pod http-rc-8cl5p http-rc-b24kx pod "http-rc-8cl5p" deleted pod "http-rc-b24kx" deleted # kubectl get pods NAME READY STATUS RESTARTS AGE http-rc-xxtrw 1/1 Running 0 3s http-rc-z8t9n 1/1 Running 0 3s 刪除了2次,最後都一樣有兩個pod生成,經測試均可以正常訪問
查看pod的描述 # kubectl describe pod Name: http-rc-xxtrw Namespace: default Node: centos-minion2/192.168.59.133 Start Time: Tue, 31 Oct 2017 16:05:51 +0800 Labels: name=http-pod Status: Running IP: 172.17.59.4 Controllers: ReplicationController/http-rc Containers: http-pod: Container ID: docker://a3338c455a27540c8f7b7b3f01fa3862b1082f7ae47e9b3761610b4a6043245b Image: 192.168.59.133:5000/centos6-http Image ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715 Port: 80/TCP State: Running Started: Tue, 31 Oct 2017 16:05:53 +0800 Ready: True Restart Count: 0 Volume Mounts: <none> Environment Variables: <none> Conditions: Type Status Initialized True Ready True PodScheduled True No volumes. QoS Class: BestEffort Tolerations: <none> Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-xxtrw to centos-minion2 2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http" 2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http" 2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Created Created container with docker id a3338c455a27; Security:[seccomp=unconfined] 2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Started Started container with docker id a3338c455a27 Name: http-rc-z8t9n Namespace: default Node: centos-minion1/192.168.59.132 Start Time: Tue, 31 Oct 2017 16:05:52 +0800 Labels: name=http-pod Status: Running IP: 172.17.34.3 Controllers: ReplicationController/http-rc Containers: http-pod: Container ID: docker://6b4fbca3f6a8690f24fe749556323a6be85f5122f378a076a8bf9d0556a89b6e Image: 192.168.59.133:5000/centos6-http Image ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715 Port: 80/TCP State: Running Started: Tue, 31 Oct 2017 16:05:54 +0800 Ready: True Restart Count: 0 Volume Mounts: <none> Environment Variables: <none> Conditions: Type Status Initialized True Ready True PodScheduled True No volumes. QoS Class: BestEffort Tolerations: <none> Events: FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-z8t9n to centos-minion1 2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http" 2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http" 2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Created Created container with docker id 6b4fbca3f6a8; Security:[seccomp=unconfined] 2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Started Started container with docker id 6b4fbca3f6a8