1、安裝前準備
1.操做系統詳情
須要三臺主機,都最小化安裝 centos7.3,並update到最新node
[root@master ~]# cat /etc/redhat-release
CentOS Linux release 7.3.1611 (Core)
角色 主機名 IP
Master master 192.168.1.14
node1 slave-1 192.168.1.15
node2 slave-2 192.168.1.16nginx
2.在每臺主機上關閉firewalld改用iptables
輸入如下命令,關閉firewallddocker
[root@master ~]# systemctl stop firewalld.service #中止firewall
[root@master ~]# systemctl disable firewalld.service #禁止firewall開機啓動
3.安裝ntp服務json
[root@master ~]# yuminstall -y ntp wget net-tools
[root@master ~]# systemctl start ntpd systemctl enable ntpd
2、安裝配置centos
注:kubernetes,etcd等已經進去centos epel源,能夠直接yum安裝(須要安裝epel-release)api
1.安裝Kubernetes Master
使用如下命令安裝kubernetes 和 etcd服務器
# yum install -y kubernetes etcd
編輯/etc/etcd/etcd.conf 使etcd監聽全部的ip地址,確保下列行沒有註釋,並修改成下面的值app
[root@master ~]# cat /etc/etcd/etcd.conf
# [member] ETCD_NAME=default ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_CLUSTER="default=http://192.168.1.14:2380"
#[cluster]
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.14:2379"
編輯Kubernetes API server的配置文件 /etc/kubernetes/apiserver,確保下列行沒有被註釋,併爲下列的值tcp
[root@master ~]# cat /etc/kubernetes/apiserver
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver # # The address on the local server to listen to. KUBE_API_ADDRESS="--address=0.0.0.0" # The port on the local server to listen on. KUBE_API_PORT="--port=8080" # Port minions listen on KUBELET_PORT="--kubelet_port=10250" # Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd_servers=http://192.168.1.14:2379" # Address range to use for services KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" # default admission control policies KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" # Add your own! KUBE_API_ARGS=""
啓動etcd, kube-apiserver, kube-controller-manager and kube-scheduler服務,並設置開機自啓ide
[root@master ~]# cat /script/kubenetes_service.sh for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $SERVICES systemctl enable $SERVICES systemctl status $SERVICES done
[root@master ~]# sh /script/kubenetes_service.sh
在etcd中定義flannel network的配置,這些配置會被flannel service下發到nodes:
[root@master ~]# etcdctl mk /centos.com/network/config '{"Network":"172.17.0.0/16"}'
添加iptables規則,容許相應的端口
[root@master ~]# iptables -I INPUT -p tcp --dport 2379 -j ACCEPT
[root@master ~]# iptables -I INPUT -p tcp --dport 10250 -j ACCEPT
[root@master ~]# iptables -I INPUT -p tcp --dport 8080 -j ACCEPT
[root@master ~]# iptables-save
或者寫入iptables配置文件 /etc/sysconfig/iptables
查看節點信息(咱們尚未配置節點信息,因此這裏應該爲空)
[root@master ~]# kubectl get nodes
NAME LABELS STATUS
2. 安裝Kubernetes Nodes
注:下面這些步驟應該在node1和node2上執行(也能夠添加更多的node)
使用yum安裝kubernetes 和 flannel
[root@slave1 ~]# yum install -y flannel kubernetes
爲flannel service配置etcd服務器,編輯/etc/sysconfig/flanneld文件中的下列行以鏈接到master
[root@slave1 ~]# cat /etc/sysconfig/flanneld
FLANNEL_ETCD="http://192.168.1.14:2379" #改成etcd服務器的ip FLANNEL_ETCD_PREFIX="/centos.com/network"
編輯/etc/kubernetes/config 中kubernetes的默認配置,確保KUBE_MASTER的值是鏈接到Kubernetes master API server:
[root@slave1 ~]# cat /etc/kubernetes/config
KUBE_MASTER="--master=http://192.168.1.14:8080"
編輯/etc/kubernetes/kubelet 以下行:
node1:
[root@slave1 ~]# cat /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=0.0.0.0" KUBELET_PORT="--port=10250" KUBELET_HOSTNAME="--hostname_override=192.168.1.15" KUBELET_API_SERVER="--api_servers=http://192.168.1.14:8080" KUBELET_ARGS=""
node2:
[root@slave2 ~]# cat /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=0.0.0.0" KUBELET_PORT="--port=10250" KUBELET_HOSTNAME="--hostname_override=192.168.1.16" KUBELET_API_SERVER="--api_servers=http://192.168.1.14:8080" KUBELET_ARGS=""
啓動kube-proxy, kubelet, docker 和 flanneld services服務,並設置開機自啓
[root@slave1 ~]# cat /script/kubernetes_node_service.sh for SERVICES in kube-proxy kubelet docker flanneld; do systemctl restart $SERVICES systemctl enable $SERVICES systemctl status $SERVICES done
在每一個node節點,你應當注意到你有兩塊新的網卡docker0 和 flannel0。你應該獲得不一樣的ip地址範圍在flannel0上,就像下面這樣:
node1:
[root@slave1 ~]# ip a | grep flannel | grep inet
inet 172.17.11.0/16 scope global flannel0
node2:
[root@slave2 ~]# ip a | grep flannel | grep inet inet 172.17.60.0/16 scope global flannel0
添加iptables規則:
[root@slave1 ~]# iptables -I INPUT -p tcp --dport 2379 -j ACCEPT [root@slave1 ~]# iptables -I INPUT -p tcp --dport 10250 -j ACCEPT [root@slave1 ~]# iptables -I INPUT -p tcp --dport 8080 -j ACCEPT
如今登錄kubernetes master節點驗證minions的節點狀態:
[root@master ~]# kubectl get nodes NAME STATUS AGE 192.168.1.15 Ready 2h 192.168.1.16 Ready 2h
至此,kubernetes集羣已經配置並運行了,咱們能夠繼續下面的步驟。
3、建立 Pods (Containers)
爲了建立一個pod,咱們須要在kubernetes master上面定義一個yaml 或者 json配置文件。而後使用kubectl命令建立pod
[root@slave1 ~]# mkdir -p /k8s/pods [root@slave1 ~]# cd /k8s/pods/ [root@slave1 ~]# cat nginx.yaml
在nginx.yaml內容以下:
apiVersion: v1 kind: Pod metadata: name: nginx labels: app: nginx spec: containers: - name: nginx image: nginx ports: - containerPort: 80
建立pod:
[root@slave1 ~]# kubectl create -f nginx.yaml
此時有以下報錯:
Error from server: error when creating "nginx.yaml": Pod "nginx" is forbidden: no API token found for service account default/default, retry after the token is automatically created and added to the service account
解決辦法是編輯/etc/kubernetes/apiserver 去除 KUBE_ADMISSION_CONTROL中的SecurityContextDeny,ServiceAccount,並重啓kube-apiserver.service服務:
#cat /etc/kubernetes/apiserver
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
#systemctl restart kube-apiserver.service
以後從新建立pod:
# kubectl create -f nginx.yaml
pods/nginx
查看pod:
# kubectl get pod nginx
NAME READY STATUS RESTARTS AGE
nginx 0/1 Image: nginx is not ready on the node 0 34s
這裏STATUS一直是這個,建立不成功,下面排錯。經過查看pod的描述發現以下錯誤:
# kubectl describe pod nginx
Wed, 28 Oct 2015 10:25:30 +0800 Wed, 28 Oct 2015 10:25:30 +0800 1 {kubelet 192.168.1.16} implicitly required container POD pulled Successfully pulled Pod container image "gcr.io/google_containers/pause:0.8.0"
Wed, 28 Oct 2015 10:25:30 +0800 Wed, 28 Oct 2015 10:25:30 +0800 1 {kubelet 192.168.1.16} implicitly required container POD failed Failed to create docker container with error: no such image
Wed, 28 Oct 2015 10:25:30 +0800 Wed, 28 Oct 2015 10:25:30 +0800 1 {kubelet 192.168.1.16} failedSync Error syncing pod, skipping: no such image
Wed, 28 Oct 2015 10:27:30 +0800 Wed, 28 Oct 2015 10:29:30 +0800 2 {kubelet 192.168.1.16} implicitly required container POD failed Failed to pull image "gcr.io/google_containers/pause:0.8.0": image pull failed for gcr.io/google_containers/pause:0.8.0, this may be because there are no credentials on this request. details: (API error (500): invalid registry endpoint "http://gcr.io/v0/". HTTPS attempt: unable to ping registry endpoint https://gcr.io/v0/
v2 ping attempt failed with error: Get https://gcr.io/v2/: dial tcp 173.194.72.82:443: i/o timeout
這裏可能會遇到pod狀態一直處於Penning的問題,此時能夠經過kubectl describe pods/pod-name
來查看pod信息,若是沒有出錯信息,那麼Minion一直處於下載鏡像中,下載好以後pod即會成功啓動。
從網上找到 pause:0.8.0 的鏡像,而後再每一個node上導入鏡像:
請在境外docker服務器執行 docker pull 命令下載鏡像
gcr.io/google_containers/pause:latest gcr.io/google_containers/pause:1.0 gcr.io/google_containers/pause:0.8.0
再用導出鏡像
docker save -o pause.tar gcr.io/google_containers/pause gzip pause.tar
最後把這個包放到 kubernetes 環境全部的 docker 服務器上
docker load -i pause.tar.gz
在執行如下命令便可成功建立pod
[root@master ~]#kubectl create -f nginx.yaml
pods/nginx
查看pod
[root@master ~]# kubectl get pod nginx NAME READY STATUS RESTARTS AGE nginx 1/1 Running 0 2min
前往nodes節點上查看docker images
[root@slave1 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE registry.access.redhat.com/rhel7/pod-infrastructure latest 34d3450d733b 10 weeks ago 205 MB gcr.io/google_containers/pause 0.8.0 bf595365a558 2 years ago 241.7 kB