環境準備node
Kubernetes-Master:192.168.37.134 #yum install kubernetes-master etcd flannel -ylinux
Kubernetes-node1:192.168.37.135 #yum install kubernetes-node etcd docker flannel *rhsm* -ynginx
Kubernetes-node2:192.168.37.146 #yum install kubernetes-node etcd docker flannel *rhsm* -yweb
系統版本:Centos7.5docker
關閉Firewalld防火牆,保證ntp時間正常同步同步vim
【K8s-master-etcd配置】後端
[root@Kubernetes-master ~]# egrep -v "#|^$" /etc/etcd/etcd.conf ETCD_DATA_DIR="/data/etcd1" ETCD_LISTEN_PEER_URLS="http://192.168.37.134:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.37.134:2379,http://127.0.0.1:2379" ETCD_MAX_SNAPSHOTS="5" ETCD_NAME="etcd1" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.134:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.134:2379" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"
配置文件詳解:api
ETCD_DATA_DIR:etcd節點名稱瀏覽器
ETCD_LISTEN_PEER_URLS:該節點與其餘etcd節點通訊時所監聽的地址tomcat
ETCD_LISTEN_CLIENT_URLS:etcd節點與客戶端通訊時所監聽的地址列表
ETCD_INITIAL_ADVERTISE_PEER_URLS:etcd集羣通訊所監聽節點地址和端口
ETCD_ADVERTISE_CLIENT_URLS:廣播本地節點地址告知其餘etcd節點,監聽本地的網絡和端口2379
ETCD_INITIAL_CLUSTER:配置etcd集羣內部全部成員地址,同時監聽2380端口,方便etcd集羣節點同步數據
root@Kubernetes-master ~]# mkdir -p /data/etcd1/
[root@Kubernetes-master ~]# chmod 757 -R /data/etcd1/
【K8s-etcd1配置】
[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/etcd/etcd.conf ETCD_DATA_DIR="/data/etcd2" ETCD_LISTEN_PEER_URLS="http://192.168.37.135:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.37.135:2379,http://127.0.0.1:2379" ETCD_MAX_SNAPSHOTS="5" ETCD_NAME="etcd2" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.135:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.135:2379" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"
[root@kubernetes-node1 ~]# mkdir -p /data/etcd2/
[root@kubernetes-node1 ~]#chmod 757 -R /data/etcd2/
【K8s-node2-etcd配置】
[root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/etcd/etcd.conf ETCD_DATA_DIR="/data/etcd3" ETCD_LISTEN_PEER_URLS="http://192.168.37.136:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.37.136:2379,http://127.0.0.1:2379" ETCD_MAX_SNAPSHOTS="5" ETCD_NAME="etcd3" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.136:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.136:2379" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"
[root@kubernetes-node2 ~]# mkdir /data/etcd3/
[root@kubernetes-node2 ~]# chmod 757 -R /data/etcd3/
至此,ETCD集羣已配置完畢,接下來啓動並驗證etcd集羣是否正常~
[root@Kubernetes-master ~]# systemctl start etcd.service #注意,上述節點都須要啓動etcd服務,同時也設置自啓
[root@Kubernetes-master ~]# systemctl enable etcd.service
【K8s-master節點API-server/config配置】
[root@Kubernetes-master ~]# egrep -v "#|^$" /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.37.134,http://192.168.37.135:2379,http://192.168.37.136:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
KUBE_API_ARGS=""
[root@Kubernetes-master ~]#systemctl start kube-apiserver
[root@Kubernetes-master ~]# systemctl enable kube-apiserver
[root@Kubernetes-master ~]# egrep -v "#|^$" /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://192.168.37.134:8080"
[root@Kubernetes-master kubernetes]# systemctl start kube-controller-manager
[root@Kubernetes-master kubernetes]# systemctl enable kube-controller-manager
[root@Kubernetes-master kubernetes]# systemctl start kube-scheduler
[root@Kubernetes-master kubernetes]# systemctl enable kube-scheduler
【k8s-node1】
kubelet配置文件
[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=0.0.0.0" KUBELET_HOSTNAME="--hostname-override=192.168.37.135" KUBELET_API_SERVER="--api-servers=http://192.168.37.134:8080" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" KUBELET_ARGS=""
config主配置文件
[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.37.134:8080"
[root@kubernetes-node1 ~]# systemctl start kubelet
[root@kubernetes-node1 ~]# systemctl enable kubelet
[root@kubernetes-node1 ~]# systemctl start kube-proxy
[root@kubernetes-node1 ~]# systemctl enable kube-proxy
【k8s-node2】
kubelet配置文件
[root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=0.0.0.0" KUBELET_HOSTNAME="--hostname-override=192.168.37.136" KUBELET_API_SERVER="--api-servers=http://192.168.37.134:8080" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" KUBELET_ARGS=""
config主配置文件
[root@kubernetes-node2 ~]# egrep -v "^$|#" /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://192.168.37.134:8080"
[root@kubernetes-node2 ~]# systemctl start kubelet
[root@kubernetes-node2 ~]# systemctl enable kubelet
[root@kubernetes-node2 ~]# systemctl start kube-proxy
[root@kubernetes-node2 ~]# systemctl enable kube-proxy
【Kubernetes-flanneld網絡配置】
[root@Kubernetes-master kubernetes]# egrep -v "#|^$" /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/sysconfig/flanneld FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379" FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@Kubernetes-master kubernetes]# etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
{"Network":"172.17.0.0/16"}
[root@Kubernetes-master kubernetes]# etcdctl get /atomic.io/network/config
{"Network":"172.17.0.0/16"}
[root@Kubernetes-master kubernetes]# systemctl restart flanneld
[root@Kubernetes-master kubernetes]# systemctl enable flanneld
[root@kubernetes-node1 ~]# systemctl start flanneld
[root@kubernetes-node1 ~]# systemctl enable flanneld
[root@kubernetes-node2 ~]# systemctl start flanneld
[root@kubernetes-node2 ~]# systemctl enable flanneld
Ps:重啓flanneld網絡,會出現三個節點的IP,在node節點上要保證docker和本身的flanneld網段一致。若是不一致,重啓docker服務便可恢復,不然的話,三個網段ping測不通
[root@Kubernetes-master ~]# etcdctl ls /atomic.io/network/subnets /atomic.io/network/subnets/172.17.2.0-24 /atomic.io/network/subnets/172.17.23.0-24 /atomic.io/network/subnets/172.17.58.0-24
檢查Kubernetes-node節點防火牆設置,查看轉發規則是否爲drop,需開啓 iptables -P FORWARD ACCEPT規則
[root@kubernetes-node1 ~]# iptables -L -n #查看防火牆規則 Chain INPUT (policy ACCEPT) target prot opt source destination KUBE-FIREWALL all -- 0.0.0.0/0 0.0.0.0/0 Chain FORWARD (policy ACCEPT) target prot opt source destination DOCKER-ISOLATION all -- 0.0.0.0/0 0.0.0.0/0 DOCKER all -- 0.0.0.0/0 0.0.0.0/0 ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 ctstate RELATED,ESTABLISHED ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 Chain OUTPUT (policy ACCEPT) target prot opt source destination KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ KUBE-FIREWALL all -- 0.0.0.0/0 0.0.0.0/0 Chain DOCKER (1 references) target prot opt source destination Chain DOCKER-ISOLATION (1 references) target prot opt source destination RETURN all -- 0.0.0.0/0 0.0.0.0/0 Chain KUBE-FIREWALL (2 references) target prot opt source destination DROP all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes firewall for dropping marked packets */ mark match 0x8000/0x8000 Chain KUBE-SERVICES (1 references) target prot opt source destination
或者開啓轉發功能
echo "net.ipv4.ip_forward = 1" >>/usr/lib/sysctl.d/50-default.conf
[root@Kubernetes-master ~]# etcdctl ls /atomic.io/network/subnets #查看網絡信息,保證連通性正常~
/atomic.io/network/subnets/172.17.38.0-24
/atomic.io/network/subnets/172.17.89.0-24
/atomic.io/network/subnets/172.17.52.0-24
[root@Kubernetes-master ~]# kubectl get nodes #在master上查看kubernetes的節點狀態
NAME STATUS AGE
192.168.37.135 Ready 5m
192.168.37.136 Ready 5m
[root@Kubernetes-master ~]# etcdctl member list #檢查etcd集羣節點狀態
328468069ff33f93: name=etcd1 peerURLs=http://192.168.37.134:2380 clientURLs=http://192.168.37.134:2379 isLeader=true
c2f8384c4776d3e7: name=etcd3 peerURLs=http://192.168.37.136:2380 clientURLs=http://192.168.37.136:2379 isLeader=false
d6ef60212aca5419: name=etcd2 peerURLs=http://192.168.37.135:2380 clientURLs=http://192.168.37.135:2379 isLeader=false
[root@Kubernetes-master ~]# kubectl get nodes #查看k8s集羣node節點狀態
NAME STATUS AGE
192.168.37.135 Ready 4h
192.168.37.136 Ready 1h
【K8s-Dashboard UI平臺部署】
Kubernetes實現對docker容器集羣的統一管理和調度,經過web界面可以更好的管理和控制
Ps:這裏咱們只須要在node1節點導入鏡像便可
[root@kubernetes-node1 ~]# docker load < pod-infrastructure.tgz
[root@kubernetes-node1 ~]# docker tag $(docker images | grep none | awk '{print $3}') registry.access.redhat.com/rhel7/pod-infrastructure [root@kubernetes-node1 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE registry.access.redhat.com/rhel7/pod-infrastructure latest 99965fb98423 18 months ago 209 MB
[root@kubernetes-node1 ~]# docker load < kubernetes-dashboard-amd64.tgz [root@kubernetes-node1 ~]# docker tag $(docker images | grep none | awk '{print $3}') bestwu/kubernetes-dashboard-amd64:v1.6.3 [root@kubernetes-node1 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE registry.access.redhat.com/rhel7/pod-infrastructure latest 99965fb98423 18 months ago 209 MB bestwu/kubernetes-dashboard-amd64 v1.6.3 9595afede088 21 months ago 139 MB
【Kubernetes-master】
編輯 ymal文件並建立Dashboard pods模塊
[root@Kubernetes-master ~]# vim dashboard-controller.yaml
[root@Kubernetes-master ~]# cat dashboard-controller.yaml apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" spec: selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubernetes-dashboard image: bestwu/kubernetes-dashboard-amd64:v1.6.3 resources: # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 50Mi requests: cpu: 100m memory: 50Mi ports: - containerPort: 9090 args: - --apiserver-host=http://192.168.37.134:8080 livenessProbe: httpGet: path: / port: 9090 initialDelaySeconds: 30 timeoutSeconds: 30
[root@Kubernetes-master ~]# vim dashboard-service.yaml
apiVersion: v1 kind: Service metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" spec: selector: k8s-app: kubernetes-dashboard ports: - port: 80 targetPort: 9090
[root@Kubernetes-master ~]# kubectl apply -f dashboard-controller.yaml
[root@Kubernetes-master ~]# kubectl apply -f dashboard-service.yaml
Ps:在建立 模塊的同時,檢查日誌是否出現異常信息
[root@Kubernetes-master ~]# tail -f /var/log/messages
能夠在node1節點上查看容器已經啓動成功~
[root@kubernetes-node1 ~]# docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS f118f845f19f bestwu/kubernetes-dashboard-amd64:v1.6.3 "/dashboard --inse..." 8 minutes ago Up 8 minutes 30dc9e7f_kubernetes-dashboard-1315149111-pfb60_kube-system_19dcb04b-6d6e-11e9-9599-000c291881f6_02fd5b8e 67b7746a6d23 registry.access.redhat.com/rhel7/pod-infrastructure:latest "/usr/bin/pod" 8 minutes ago Up 8 minutes es-dashboard-1315149111-pfb60_kube-system_19dcb04b-6d6e-11e9-9599-000c291881f6_4e2cb565
經過瀏覽器可驗證輸出k8s-master端訪問便可
簡單部署啓動一個nginx容器,而且對外提供訪問服務
建立Server外部服務,默認會啓動一個隨機集羣IP,將80端口映射成後端pod容器端口80,經過在局域網訪問集羣IP+80端口,便可訪問後端pod集羣應用,如果外部訪問則經過node節點IP+隨機生成的端口接口訪問pod後端應用
瀏覽器訪問node節點的IP地址+隨機映射端口便可訪問到k8s建立的nginx容器
【拓展-本地私有倉庫部署】
# docker run -itd -p 5000:5000 -v /data/registry:/var/registry docker.io/registry
# docker tag docker.io/tomcat 192.168.37.135:5000/tomcat
# vim /etc/sysconfig/docker
OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false --insecure-registry 192.168.37.135:5000' ADD_REGISTRY='--add-registry 192.168.37.135:5000'
#systemctl restart docker.service
# docker push 192.168.37.135:5000/tomcat