github地址: https://github.com/kubernetes/kubernetes/
官方網站: kubernets.iohtml
pod使用接口技術前端
pod:k8s中運行容器的最小單元node
service: 實現了從宿主機外層訪問k8s內部不一樣的容器的訪問方式,還實現了pod的動態發現;所以能夠說Service是k8s內部的負載均衡器linux
master: 主節點nginx
Node節點git
kube-proxy: https://k8smeetup.github.io/docs/admin/kube-proxy/github
維護node節點上的網絡規則,實現用戶訪問請求的轉發,其實就是轉發給service,須要管理員指定service和NodePort的對應關係web
角色 | 主機名 | ip地址 | 軟件 |
---|---|---|---|
master-1 | centos7-node1 | 192.168.56.11 | docker,kube-comtroller-manager,kube-apiserver,kube-schduler |
master-2 | centos7-node2 | 192.168.56.12 | docker,kube-comtroller-manager,kube-apiserver,kube-schduler |
master-3 | centos7-node3 | 192.168.56.13 | docker,kube-comtroller-manager,kube-apiserver,kube-schduler |
ha-1 | centos7-node4 | 192.168.56.14 | haproxy,keepalived |
ha-2 | centos7-node5 | 192.168.56.15 | haproxy,keepalived |
harbor-1 | centos7-node6 | 192.168.56.16 | docker,docker-compose,harbor |
node-1 | centos7-node7 | 192.168.56.17 | kubelet,kube-proxy |
node-2 | centos7-node8 | 192.168.56.18 | kubelet,kube-proxy |
$ wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo && yum -y install epel-release $ wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker.repo
$ sed -i '/SELINUX=enforcing/SELINUX=disable/g' /etc/selinux/config $ systemctl stop firewalld && systemctl disable firewalld $ sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab $ swapoff -a
$ yum install chrony -y && systemctl enable chronyd && systemctl start chronyd && timedatectl set-timezone Asia/Shanghai && timedatectl set-ntp yes
$ cat /etc/hosts 192.168.56.11 centos7-node1 192.168.56.12 centos7-node2 192.168.56.13 centos7-node3 192.168.56.14 centos7-node4 192.168.56.15 centos7-node5 192.168.56.16 centos7-node6 192.168.56.17 centos7-node7 192.168.56.18 centos7-node8 192.168.56.16 harbor.magedu.com
$ modprobe br_netfilter $modprobe br_netfilter && cat > /etc/sysctl.d/k8s.conf << EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF sysctl -p /etc/sysctl.d/k8s.conf
yum -y install ipvsadm ipset cat > /etc/sysconfig/modules/ipvs.modules <<EOF modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF
$mkdir -p /etc/docker && yum -y install docker-ce $ tee /etc/docker/daemon.json <<-'EOF' { "registry-mirrors": ["https://0b8hhs68.mirror.aliyuncs.com"], "insecure-registry": ["harbor.magedu.com"], "exec-opts": ["native.cgroupdriver=systemd"] } EOF $ systemctl enable docker && systemctl start docker
$ yum -y install epel-release docker-compose $ wget https://github.com/goharbor/harbor/releases/download/v2.0.0/harbor-offline-installer-v2.0.0.tgz $ tar xf harbor-offline-installer-v2.0.0.tgz -C /usr/local/src && cd /usr/local/src/harbor $ cp harbor.yml.tmpl harbor.yml $ vim harbor.yml hostname: harbor.magedu.com http: # port for http, default is 80. If https enabled, this port will redirect to https port port: 80 # https related config #https: # https port for harbor, default is 443 # port: 443 # The path of cert and key files for nginx # certificate: /your/certificate/path # private_key: /your/private/key/path $ ./install.sh
建立baseimages鏡像倉庫算法
$ yum -y install keepalived haproxy $ vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 192.168.200.1 smtp_connect_timeout 30 router_id LVS_DEVEL vrrp_skip_check_adv_addr vrrp_strict vrrp_garp_interval 0 vrrp_gna_interval 0 } vrrp_instance VI_1 { state MASTER interface ens33 virtual_router_id 56 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.56.110 dev ens33 label ens33:1 } } $ systemctl enable keepalived && systemctl start keepalived $ vim /etc/haproxy/haproxy.cfg listen k8s-api-6443 bind 192.168.56.110:6443 mode tcp server centos7-node1 192.168.56.11:6443 check inter 3s fall 3 rise 5 server centos7-node2 192.168.56.12:6443 check inter 3s fall 3 rise 5 server centos7-node3 192.168.56.13:6443 check inter 3s fall 3 rise 5 $ systemctl restart haproxy $ ss -tnl | grep 6443 # 測試存活
$ cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=kubernetes baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch enabled=1 EOF $ yum install -y kubelet-1.17.2 kubeadm-1.17.2 kubectl-1.17.2 $ systemctl enable kubelet && systemctl start kubelet
$ mkdir /data/scripts -p $ kubeadm completion bash > /data/scripts/kubeadm_completion.sh && chmod +x /data/scripts/kubeadm_completion.sh $ vim /etc/profile source /data/scripts/kubeadm_completion.sh $ source /etc/profile $ kubeadm config print default #打印默認初始化配置 $ kubeadm config images list --kubernetes-version v1.17.2 # 打印須要下載的軟件 $ kubeadm init --apiserver-advertise-address=192.168.56.11 --apiserver-bind-port=6443 --control-plane-endpoint=192.168.56.110 --kubernetes-version=v1.17.2 --ignore-preflight-errors=swap --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers --pod-network-cidr=10.10.0.0/16 --service-cidr=172.26.0.0/16 --service-dns-domain=linux.local #記住返回的信息 You can now join any number of control-plane nodes by copying certificate authorities and service account keys on each node and then running the following as root: kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm \ --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3 \ --control-plane Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm \ --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3 $ kubeadm init phase upload-certs --upload-certs #生成controplane信息 6c3a44aee4b3fabb5beb44ab696fee6043c77d3461cee1f2c9e80058aa42d493 #### 須要在其餘兩個master節點執行 $ kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3 --control-plane --certificate-key 6c3a44aee4b3fabb5beb44ab696fee6043c77d3461cee1f2c9e80058aa42d493 $ kubectl get nodes #狀態不ok。須要安裝網絡插件 NAME STATUS ROLES AGE VERSION centos7-node1 NotReady master 37m v1.17.2 centos7-node2 NotReady master 25m v1.17.2 centos7-node3 NotReady master 18m v1.17.2
$ cat kubeadm-1.17.2.yml apiVersion: kubeadm.k8s.io/v1beta2 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: 48h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.56.11 bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock name: centos7-node1 taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiServer: timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controlPlaneEndpoint: 192.168.56.110 controllerManager: {} dns: type: CoreDNS etcd: local: dataDir: /var/lib/etcd imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers kind: ClusterConfiguration kubernetesVersion: v1.17.2 networking: dnsDomain: linux.local podSubnet: 10.10.0.0/16 serviceSubnet: 172.26.0.0/16 scheduler: {} $ kubeadm init --config kubeadm-1.17.2.yml #其中一個master上一次,而後從新加入其餘master節點便可
$ vim /etc/hosts # GitHub Start 52.74.223.119 github.com 192.30.253.119 gist.github.com 54.169.195.247 api.github.com 185.199.111.153 assets-cdn.github.com 151.101.76.133 raw.githubusercontent.com 151.101.76.133 gist.githubusercontent.com 151.101.76.133 cloud.githubusercontent.com 151.101.76.133 camo.githubusercontent.com 151.101.76.133 avatars0.githubusercontent.com 151.101.76.133 avatars1.githubusercontent.com 151.101.76.133 avatars2.githubusercontent.com 151.101.76.133 avatars3.githubusercontent.com 151.101.76.133 avatars4.githubusercontent.com 151.101.76.133 avatars5.githubusercontent.com 151.101.76.133 avatars6.githubusercontent.com 151.101.76.133 avatars7.githubusercontent.com 151.101.76.133 avatars8.githubusercontent.com # GitHub End $ wget wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml $ vim kube-flannel.yml #子網與service一致 net-conf.json: | { "Network": "10.10.0.0/16", "Backend": { "Type": "vxlan" } } $ kubectl apply -f kube-flannel.yml $ kubectl get nodes #master狀態所有Ready NAME STATUS ROLES AGE VERSION centos7-node1 Ready master 38m v1.17.2 centos7-node2 Ready master 26m v1.17.2 centos7-node3 Ready master 19m v1.17.2
$ cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=kubernetes baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch enabled=1 EOF $ yum install -y kubelet-1.17.2 kubeadm-1.17.2 $ systemctl enable kubelet && systemctl start kubelet
$ kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm \ --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3
$ kubectl run net-test1 --image=alpine --replicas=3 sleep 360000 #建立測試 $ kubectl run net-test2 --image=alpine --replicas=3 sleep 360000 $ kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES net-test1 1/1 Running 1 17h 10.10.5.3 centos7-node8 <none> <none> net-test2 1/1 Running 1 17h 10.10.3.3 centos7-node7 <none> <none> / # ping -c 3 qq.com # 外網測試 PING qq.com (58.250.137.36): 56 data bytes 64 bytes from 58.250.137.36: seq=0 ttl=127 time=42.395 ms 64 bytes from 58.250.137.36: seq=1 ttl=127 time=42.930 ms 64 bytes from 58.250.137.36: seq=2 ttl=127 time=42.146 ms / # ping -c 3 192.168.56.13 #宿主機測試 PING 192.168.56.13 (192.168.56.13): 56 data bytes 64 bytes from 192.168.56.13: seq=0 ttl=63 time=0.438 ms 64 bytes from 192.168.56.13: seq=1 ttl=63 time=0.468 ms / # ping -c 3 10.10.3.3 #pod間測試 PING 10.10.3.3 (10.10.3.3): 56 data bytes 64 bytes from 10.10.3.3: seq=0 ttl=62 time=1.153 ms 64 bytes from 10.10.3.3: seq=1 ttl=62 time=1.069 ms 64 bytes from 10.10.3.3: seq=2 ttl=62 time=1.296 ms
$ docker pull kubernetesui/dashboard:v2.0.0-rc6 $ docker pull kubernetesui/metrics-scraper:v1.0.3 $ docker tag kubernetesui/dashboard:v2.0.0-rc6 harbor.magedu.com/baseimages/dashboard:v2.0.0-rc6 $ docker tag 3327f0dbcb4a harbor.magedu.com/baseimages/metrics-scraper:v1.0.3 #根據tag打鏡像 $ docker login $ docker push harbor.magedu.com/baseimages/dashboard:v2.0.0-rc6 $ docker push harbor.magedu.com/baseimages/metrics-scraper:v1.0.3 # 修改配置 $ wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc6/aio/deploy/recommended.yaml $ vim dashboard-2.0.0-rc6.yml #新增service的端口暴露與內網harbor image: harbor.magedu.com/baseimages/dashboard:v2.0.0-rc6 #兩處修改 image: harbor.magedu.com/baseimages/metrics-scraper:v1.0.3 #兩處修改 kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: type: NodePort ports: - port: 443 targetPort: 8443 nodePort: 30002 selector: k8s-app: kubernetes-dashboard $ vim admin-user.yml apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kubernetes-dashboard --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kubernetes-dashboard $ kubectl apply -f dashboard-2.0.0-rc6.yml $ kubectl apply -f admin-user.yml $ kubectl get pod -A $ kubectl get svc -A
訪問任意主機的https://192.168.56.12:30002 便可docker
$ kubectl get secret -A | grep admin-user kubernetes-dashboard admin-user-token-5vvwn kubernetes.io/service-account-token 3 6m17s $ kubectl describe secret admin-user-token-5vvwn -n kubernetes-dashboard 獲取token便可
$ yum list --showduplicates | grep kubeadm $ yum -y install kubeadm-1.17.4 $ kubeadm upgrade plan #檢查升級計劃
$ yum -y install kubeadm-1.17.4 $ kubeadm upgrade apply v1.17.4
$ yum -y install kubelet-1.17.4 kubectl-1.17.4 $ systemctl daemon-reload && systemctl restart kubelet
$ yum -y install kubeadm-1.17.4 $ kubeadm upgrade node --kubelet-version 1.17.4 $ yum -y install kubelet-1.17.4 $ systemctl daemon-reload && systemctl restart kubelet
$ kubectl get nodes NAME STATUS ROLES AGE VERSION centos7-node1 Ready master 31h v1.17.4 centos7-node2 Ready master 30h v1.17.4 centos7-node3 NotReady master 30h v1.17.4 centos7-node7 Ready <none> 30h v1.17.4 centos7-node8 Ready <none> 29h v1.17.4
參考用例: https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/
$ docker pull nginx:1.14.2 $ docker tag nginx:1.14.2 harbor.magedu.com/baseimages/nginx:1.14.2 $ docker login harbor.magedu.com $ docker push harbor.magedu.com/baseimages/nginx:1.14.2 #上傳基礎鏡像到本地倉庫 $ mkdir ~/kubeadm_demo/nginx && vim ~/kubeadm_demo/nginx/nginx.yml apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 1 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: harbor.magedu.com/baseimages/nginx:1.14.2 ports: - containerPort: 80 --- kind: Service apiVersion: v1 metadata: labels: app: magedu-nginx-service-label name: magedu-nginx-service namespace: default spec: type: NodePort ports: - name: http port: 80 protocol: TCP targetPort: 80 nodePort: 30004 selector: app: nginx $ kubectl apply -f ~/kubeadm_demo/nginx/nginx.yml $ kubectl get pod -A $ kubectl get svc -A $ kubectl logs nginx-deployment-79dbb87ff9-w5f87 -f #查看nginx-deployment 訪問日誌
haproxy和keepalived配置
$ docker pull tomcat $ docker run --name tomcat -d -p 8080:8080 tomcat $ docker exec -it tomcat bash root@7785ba4b14d2:/usr/local/tomcat# cd webapps root@7785ba4b14d2:/usr/local/tomcat/webapps# echo "tomcat.app" > app/index.html
測試訪問 http://192.168.56.11:8080/app/
在harbor建立linux公開倉庫
$ mkdir ~/kubeadm_demo/tomcat && vim ~/kubeadm_demo/tomcat/Dockerfile FROM tomcat ADD ./app /usr/local/tomcat/webapps/app/ $ cd ~/kubeadm_demo/tomcat && mkdir app && echo "tomcat APP" > app/index.html $ docker build -t harbor.magedu.com/linux/tomcat:app . #構建鏡像 $ docker run --name t1 -it --rm -p 8080:8080 harbor.magedu.com/linux/tomcat:app #測試鏡像是否有誤 $ docker push harbor.magedu.com/linux/tomcat:app
[root@centos7-node1 tomcat]# vim tomcat.yml apiVersion: apps/v1 kind: Deployment metadata: name: tomcat-deployment labels: app: tomcat spec: replicas: 1 selector: matchLabels: app: tomcat template: metadata: labels: app: tomcat spec: containers: - name: tomcat image: harbor.magedu.com/linux/tomcat:app ports: - containerPort: 80 --- kind: Service apiVersion: v1 metadata: labels: app: magedu-tomcat-service-label name: magedu-tomcat-service namespace: default spec: type: NodePort ports: - name: http port: 80 protocol: TCP targetPort: 8080 #nodePort: 30005 selector: app: tomcat
$ cd ~/kubeadm_demo/nginx && vim Dockerfile FROM nginx:1.14.2 ADD default.conf /etc/nginx/conf.d/ $ vim default.conf server { listen 80; server_name localhost; location /app { proxy_pass http://magedu-tomcat-service; #tomcat的svc } } $ docker build -t harbor.magedu.com/baseimages/nginx:v0.1 . $ docker push harbor.magedu.com/baseimages/nginx:v0.1 $ vim nginx.yml apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 1 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: harbor.magedu.com/baseimages/nginx:v0.1 #修改鏡像 ports: - containerPort: 80 --- kind: Service apiVersion: v1 metadata: labels: app: magedu-nginx-service-label name: magedu-nginx-service namespace: default spec: type: NodePort ports: - name: http port: 80 protocol: TCP targetPort: 80 nodePort: 30004 selector: app: nginx $ kubectl apply -f nginx.yml #此時訪問 http://192.168.56.11:30004/app/index.html #便可
kubeadm token generate kubeadm token create kubeadm token delete