環境說明:node
CentOS Linux release 7.2.1511 (Core) 192.168.50.218 master 192.168.50.219 node1 192.168.50.220 node2 配置爲8C16G
#防火牆 systemctl status iptables systemctl status firewalld systemctl stop firewalld systemctl disable firewalld systemctl status firewalld #selinux 關閉 sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config setenforce 0 #打開文件數 sed -i '/# End of file/i\*\t\t-\tnofile\t\t65535' /etc/security/limits.conf cat /etc/security/limits.conf ulimit -HSn 65535 ulimit -n #內核參數調整 echo 'net.ipv4.ip_forward = 1' > /etc/sysctl.conf echo 'net.ipv4.tcp_timestamps = 1' >> /etc/sysctl.conf echo 'net.ipv4.tcp_tw_recycle = 1' >> /etc/sysctl.conf echo 'net.ipv4.tcp_fin_timeout = 30' >> /etc/sysctl.conf sysctl -p #配置yum源爲aliyun cd /etc/yum.repos.d mv CentOS-Base.repo CentOS-Base.repo.bak rpm -ivh http://source.zhaolibin.com/scripts/wget-1.14-10.el7_0.1.x86_64.rpm wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo yum clean all yum makecache #修改時區並同步時間 cat /usr/share/zoneinfo/Asia/Shanghai > /etc/localtime yum install ntpdate -y echo "*/5 * * * * /usr/sbin/ntpdate 0.asia.pool.ntp.org" >> /var/spool/cron/root yum -y install conntrack net-tools
下載 k8s_v1.10.0_install.tar.gz 安裝包到/opt目錄下 連接:https://pan.baidu.com/s/1pJrhBfMIrYjoqzUHen_ZqQ 密碼:y0nc
設置三臺機器hosts(三臺機器都配置)linux
192.168.50.218 master 192.168.50.219 node1 192.168.50.220 node2
分別配置hostnamedocker
配置免密登錄shell
#只在master執行 ssh-keygen 一直回車 cat /root/.ssh/id_rsa.pub > authorized_keys scp -r /root/.ssh node1:/root/ scp -r /root/.ssh node2:/root/
組件版本部署方式安裝目錄或訪問入口vim
etcd3.3.2二進制/usr/local/kubernetes/bin/etcd docker18.03.0-ce二進制/usr/bin/docker flannel0.10.0二進制/usr/local/kubernetes/bin/flanneld kubernetes1.10.0二進制/usr/local/kubernetes/bin kube-apiserver、kube-controller-manager、kubectl、kube-proxy、kube-scheduler
Node IP:192.168.50.219/220
組件版本部署方式安裝目錄或訪問入口api
etcd3.3.2二進制/usr/local/kubernetes/bin/etcd docker18.03.0-ce二進制/usr/bin/docker flannel0.10.0二進制/usr/local/kubernetes/bin/flanneld kubernetes1.10.0二進制/usr/local/kubernetes/bin Kubectl、kubelet、kube-proxy
cd /opt/ tar -zxvf k8s_v1.10.0_install.tar.gz
修改/opt/k8s_v1.10.0_install/etcd/install_etcd.sh腳本中ifconfig獲取本機ip地址的命令,按照實際狀況進行修改以正確過濾得到主機IP。(此處改成eth0)ssh
ETCD_LISTEN_IP=`ifconfig eth0 | grep 'inet ' | awk '{ print $2}'`
在三臺機器分別執行tcp
#master cd /opt/k8s_v1.10.0_install/etcd ./install_etcd.sh etcd01 etcd01=http://192.168.50.218:2380,etcd02=http://192.168.50.219:2380,etcd03=http://192.168.50.220:2380 #node1 cd /opt/k8s_v1.10.0_install/etcd ./install_etcd.sh etcd02 etcd01=http://192.168.50.218:2380,etcd02=http://192.168.50.219:2380,etcd03=http://192.168.50.220:2380 cd /opt/k8s_v1.10.0_install/etcd ./install_etcd.sh etcd03 etcd01=http://192.168.50.218:2380,etcd02=http://192.168.50.219:2380,etcd03=http://192.168.50.220:2380
檢測是否搭建成功ide
vim /etc/profile export K8S_HOME=/usr/local/kubernetes export PATH=$PATH:$K8S_HOME/bin source /etc/profile etcdctl member list [root@localhost etcd]# etcdctl member list 3391fd63f6e21a41: name=etcd02 peerURLs=http://node1:2380 clientURLs=http://192.168.50.219:2379 isLeader=false 41122860660738be: name=etcd01 peerURLs=http://master:2380 clientURLs=http://192.168.50.218:2379 isLeader=true fcb34b889f767170: name=etcd03 peerURLs=http://node2:2380 clientURLs=http://192.168.50.220:2379 isLeader=false
Flannel須要在全部主機上安裝。
編輯install_flannel.sh腳本設置正確的網卡接口及etcd鍵值優化
FLANNEL_NET='{"Network":"172.18.0.0/16", "SubnetMin": "172.18.1.0", "SubnetMax": "172.18.254.0", "Backend": {"Type": "host-gw"}}' IFACE="eth0"
#三臺機器都執行 cd /opt/k8s_v1.10.0_install/flannel/ ./install_flannel.sh http://master:2379,http://node1:2379,http://node2:2379
cd /opt/k8s_v1.10.0_install/docker ./install-docker.sh docker-18.03.0-ce.tgz
#master cd /opt/k8s_v1.10.0_install/kubernetes/master ./install_k8s_master.sh 192.168.50.218 http://master:2379,http://node1:2379,http://node2:2379 #node一、node2 cd /opt/k8s_v1.10.0_install/kubernetes/node/ ./install_k8s_node.sh master
確認狀態
[root@master master]# kubectl get nodes NAME STATUS ROLES AGE VERSION node1 Ready <none> 5m v1.10.0 node2 Ready <none> 5m v1.10.0 [root@master master]# kubectl get cs NAME STATUS MESSAGE ERROR scheduler Healthy ok controller-manager Healthy ok etcd-0 Healthy {"health":"true"} etcd-1 Healthy {"health":"true"} etcd-2 Healthy {"health":"true"}
cd /opt/k8s_v1.10.0_install/kubernetes/images docker load -i gcr.io~google_containers~pause-amd64~3.0.tar.gz docker load -i busybox.tar docker load -i k8s-dns-dnsmasq-nanny-amd64_v1.14.7.tar docker load -i k8s-dns-kube-dns-amd64_1.14.7.tar docker load -i k8s-dns-sidecar-amd64_1.14.7.tar docker load -i kubernetes-dashboard-amd64_v1.8.3.tar