sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config grep SELINUX=disabled /etc/selinux/config setenforce 0 getenforce systemctl stop firewalld systemctl disable firewalld
#swapoff –a
#不安裝也能夠 #yum install chronyd –y #systemctl start chronyd #systemctl enable chronyd
cat >>/etc/sysconfig/modules/ipvs.modules<<EOF ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs" for mod in $(ls $ipvs_mods_dir |grep -o "^[^.]*");do /sbin/modinfo -F filename $mod &>/dev/null if [ $? -eq 0 ];then /sbin/modprobe $mod fi done EOF
chmod +x /etc/sysconfig/modules/ipvs.modules #手動啓動 bash /etc/sysconfig/modules/ipvs.modules
1.2.1 選擇3臺機器 Master:11.0.0.11 Node01:11.0.0.21 Node02:11.0.0.22
#看狀況,若是有其餘docker源,能夠不用下載,由於docker官方源下載很慢 #cd /etc/yum.repo.d #wget http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum install docker-ce –y
docker1.13後自動設置iptables的FORWARD默認策略爲DROP,這可能會影響Kubernetes集羣依賴的報文轉發功能,所以在docker服務啓動後,從新將FORWARD鏈設置爲ACCEPT,方式是修改/usr/lib/system/docker.service文件。node
sed -i '14aExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT' /usr/lib/systemd/system/docker.service
iptables –vnL sysctl -a|grep bridge cat >>/etc/sysctl.d/k8s.conf<<EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl -p /etc/sysctl.d/k8s.conf
scp /etc/sysctl.d/k8s.conf node01:/etc/sysctl.d/ scp /etc/sysctl.d/k8s.conf node02:/etc/sysctl.d/ scp /usr/lib/systemd/system/docker.service node01:/usr/lib/systemd/system/ scp /usr/lib/systemd/system/docker.service node02:/usr/lib/systemd/system/
#systemctl daemon-reload #systemctl start docker #systemctl enable docker
#centos7自帶源,看狀況配置源 cat >>/etc/yum.repos.d/kubernetes.repo<<EOF [kubernetes] name=Kubernetes Repository baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ gpgcheck=1 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg EOF
sed行尾插入指令 #在生產環境無需配置這個 #sed -i 's/$/&"--fail-swap-on=false"/g' /etc/sysconfig/kubelet
sudo yum -y install epel-release sudo yum update
配置源 Sudo vim /etc/yum.repos.d/virt7-docker-common-release.repo [virt7-docker-common-release] name=virt7-docker-common-release baseurl=http://cbs.centos.org/repos/virt7-docker-common-release/x86_64/os/ gpgcheck=0 #master節點安裝 sudo yum install -y --enablerepo=virt7-docker-common-release kubernetes etcd kubernetes-master ntp flannel #slave節點安裝 sudo yum install -y --enablerepo=virt7-docker-common-release kubernetes kubernetes-node ntp flannel docker #全部節點時間校準 #sudo systemctl start ntpd #sudo systemctl enable ntpd #sudo ntpdate ntp1.aliyun.com #sudo hwclock –w
[web@m01 yum.repos.d]$ grep -v '^#' /etc/etcd/etcd.conf ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_CLIENT_URLS="http://11.0.0.11:2379,http://127.0.0.1:2379" ETCD_NAME="default" #etcd的集羣名稱我使用默認的,本身能夠定義 ETCD_ADVERTISE_CLIENT_URLS=http://11.0.0.11:2379 #啓動etcd服務 sudo systemctl start etcd sudo systemctl enable etcd #查看etcd集羣是否正常 etcdctl cluster-health #查看集羣成員 [web@m01 yum.repos.d]$ etcdctl member list #0.0.0.0表示內網全部機器 8e9e05c52164694d: name=default peerURLs=http://localhost:2380 clientURLs=http://0.0.0.0:2379 isLeader=true
[web@m01 yum.repos.d]$ grep -v '^#' /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://11.0.0.11:8080"
[web@m01 yum.repos.d]$ grep -v '^#' /etc/kubernetes/apiserver KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" KUBE_API_PORT="--port=8080" KUBELET_PORT="--kubelet-port=10250" KUBE_ETCD_SERVERS="--etcd-servers=http://11.0.0.11:2379" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota" KUBE_API_ARGS=""
[web@m01 yum.repos.d]$ grep -v '^#' /etc/kubernetes/controller-manager KUBE_CONTROLLER_MANAGER_ARGS=""
[web@m01 yum.repos.d]$ grep -v '^#' /etc/kubernetes/scheduler KUBE_SCHEDULER_ARGS="--address=0.0.0.0"
for i in etcd kube-apiserver kube-controller-manager kube-scheduler flanneld; do sudo systemctl restart $i; sudo systemctl enable $i; done
# #etcdctl set /kube-centos/network/config '{"Network": "172.16.0.0/16"}'
[web@s01 ~]$ grep -v '^#' /etc/sysconfig/flanneld FLANNEL_ETCD_ENDPOINTS="http://11.0.0.11:2379" FLANNEL_ETCD_PREFIX="/kube-centos/network" #etcd主節點網絡配置調用路徑,可在主節點查看 #master查看etcd配置網絡文件路徑 [web@m01 ~]$ etcdctl ls /kube-centos/network/subnets /kube-centos/network/subnets/172.30.52.0-24 /kube-centos/network/subnets/172.30.53.0-24 [web@m01 ~]$ etcdctl get /kube-centos/network/subnets/172.30.52.0-24 {"PublicIP":"11.0.0.21","BackendType":"vxlan","BackendData":{"VtepMAC":"ea:bb:6e:be:bb:7e"}} [web@m01 ~]$ etcdctl get /kube-centos/network/subnets/172.30.53.0-24 {"PublicIP":"11.0.0.11","BackendType":"vxlan","BackendData":{"VtepMAC":"ae:63:3a:83:34:31"}}
[web@s01 ~]$ grep -v '^#' /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://11.0.0.11:8080"
[web@s01 ~]$ grep -v '^#' /etc/kubernetes/proxy KUBE_PROXY_ARGS="--bind-address=0.0.0.0" #支持內網全部機器
[web@s01 ~]$ grep -v '^#' /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=11.0.0.21" KUBELET_PORT="--port=10250" KUBELET_HOSTNAME="--hostname-override=11.0.0.21" KUBELET_API_SERVER="--api-servers=http://11.0.0.11:8080" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" KUBELET_ARGS="--logtostderr=false --v=0 --log-dir=/data/logs/kubernetes"
for i in flanneld kube-proxy kubelet docker; do sudo systemctl restart $i; sudo systemctl enable $i; sudo systemctl status $i ; done
kubectl config set-cluster default-cluster --server=http://11.0.0.11:8080 kubectl config set-context default-context --cluster=default-cluster --user=default-admin kubectl config use-context default-context
kubectl get pod
#這裏我實現建立了一個k8s 部署容器的目錄 [web@m01 k8s]$ pwd /devops/k8s #部署nginx pod [web@m01 k8s]$ cat nginx-pod.yaml apiVersion: v1 kind: Pod metadata: name: nginx labels: app: nginx spec: containers: - name: nginx image: nginx imagePullPolicy: IfNotPresent ports: - containerPort: 80 restartPolicy: Always #此處建立k8s nginx pod [web@m01 k8s]$ kubectl create -f nginx-pod.yaml [web@m01 k8s]$ cat nginx-svc.yaml apiVersion: v1 kind: Service metadata: name: nginx-service spec: type: NodePort sessionAffinity: ClientIP selector: app: nginx ports: - port: 80 nodePort: 30080 #此處建立k8s nginx svc [web@m01 k8s]$ kubectl create -f nginx-svc.yaml
[web@m01 k8s]$ kubectl get pod NAME READY STATUS RESTARTS AGE nginx 1/1 Running 0 6h
有不少人建立後,發現狀態是ContainerCreating,這表明/etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt證書沒有軟鏈接到/etc/rhsm/ca/redhat-uep.pempython
#最直接的方法,下載rpm依賴,將密鑰導入 wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | sudo tee /etc/rhsm/ca/redhat-uep.pem #執行完這兩步後,發現軟鏈接好了,說白了,這就是一個加密過程 [web@m01 k8s]$ ll /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt lrwxrwxrwx. 1 root root 27 Mar 14 07:43 /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt -> /etc/rhsm/ca/redhat-uep.pem #此時須要重啓相應服務,才能看見pod狀態變爲running