搭建k8s如此簡單,真的是一(賤)部署

1.該腳本使用yum安裝kubernetes,當前最新版本爲1.12.1,若使用yum安裝的kubelet版本更高級或者更低級,則該腳本不適用!
2.該文章只是安裝一個kubernetes的集羣,暫不對kubernetes的各個功能組件作詳細的介紹
3.該腳本是一個master,能夠使N個node節點。
4.系統環境爲centos7.4
3.該腳本使用了26個函數
    def_get_master_ip ###獲取master節點IP,獲得一個變量MASTER_IP
    def_get_nodes_ip   ####獲取node節點IP,獲得一個數組NODES_IP
    def_command
    def_create_key     ####建立一個key
    def_send_key        ##發送公鑰到node節點
    def_host            ##添加主機域名解析
    def_ntp
    def_send_host        ##同步各個節點的hosts文件,保證hosts文件一致性
    def_selinux master        ##配置master節點的selinux
    def_selinux nodes            ##配置node節點的selinux
    def_firewalld master        ##配置master防火牆
    def_firewalld nodes        ##配置node節點防火牆
    def_k8s_docker_repo        ##建立kubernetes、docker-ce的yum源
    def_send_repo_nodes        ##同步kubernetes、docker-ce的yum源到各個node節點
    def_install_service master        ##安裝kubernetes主鍵,kubelet docker-ce kubeadm kubectl
    def_install_service nodes        ##安裝kubernetes主鍵,kubelet docker-ce kubeadm kubectl
    def_start_service master        ##啓動服務
    def_start_service nodes            ##啓動服務
    def_swap_off                ##
    def_configurekernel            
    def_master_image_download            ##下載鏡像
    def_master_init                    ##初始化master節點
    def_master_configure            ##建立master必要的文件
    def_flannel                ##安裝flannel網絡
    def_nodes_image_download        ##node節點鏡像下載
    def_join_master            ##加入到master節點中
###高手勿噴,腳本簡陋。請多多指點!!
下面是腳本文件的內容node

#!/bin/bash
#author:MR_xiaosu
#date=2018-10-22
############################################################################################################
#######################該腳本須要在master節點上執行############################################################
#######################腳本執行須要輸入兩個參數################################################################
#######################一個是master主節點的IP地址;############################################################
#######################一個是從節點的IP地址,多從節點須要以空格區分開,##########################################
#######################例子:好比說我有三個node節點,則輸入10.10.10.11 10.10.10.12 10.10.10.13##################
########################################################
##定義ssh scp 遠程端口
PORT="22"
##定義啓動服務
VAR_START_SERVICE=(docker kubelet)
##定義使用yum安裝的服務
VAR_INSTALL_SERVICE=(docker-ce kubelet kubectl kubeadm)
##獲取master節點IP的函數
def_get_master_ip (){
    read -p "please select the host for master,for example input '192.168.1.10.'" MASTER_IP
}
##獲取節點IP地址池
def_get_nodes_ip (){
    echo  -e  "please input you node ip,for example '192.168.1.11 192.168.1.12 '";
    read -ra NODES_IP
}
##檢測命令執行是否成功的函數
def_command (){
    if [ "$?" == "0" ];then
        echo -e "The $1 command  had been successed"
    else
        echo -e "$1 command had been failed"
    fi
}
##建立ssh密碼登陸
def_create_key (){
    KEY_PUB="/root/.ssh/id_rsa.pub"
    KEY="/root/.ssh/id_rsa"
    if [ ! -f "$KEY" -o ! -f $KEY_PUB ]; then
        ssh-keygen -t rsa -f ~/.ssh/id_rsa -N "" -q
    else
        echo "Key already exists"
    fi
}
##發送公鑰到各個node節點
def_send_key (){
    which "sshpass" > /dev/null
    if [ $? -eq 0 ]
    then
            echo -e "sshpass command is exist"
    else
            echo "sshpass command not exist,now install it!"
        yum -y install sshpass
    fi
    for ip in ${NODES_IP[@]}
    do
####sshpass命令,-p後面跟着node節點的密碼,須要各個node節點root密碼一致,我這裏node節點的密碼都是1
        sshpass -p"1" ssh-copy-id -i /root/.ssh/id_rsa.pub root@$ip  -o StrictHostKeyChecking=no
    done
}
###須要hosts文件,
def_host (){
#####生產環境下面一條命令可註釋掉!
    echo -e "$MASTER_IP master" >> /etc/hosts
    hostnamectl set-hostname master
    A=1
    for ip in ${NODES_IP[@]}
    do
        ssh -p $PORT root@$ip "hostnamectl set-hostname node{$A}"
##生產環境下面一條可註釋掉
        echo "$ip node${A}" >> /etc/hosts
        A=$[$A+1]
    done
}
#生產環境該函數可註釋掉
def_send_host (){
    for ip in ${NODES_IP[@]}
    do
        scp -p$PORT /etc/hosts root@$ip:/etc/
    done
}
###時間同步函數,傳入一個參數,表示主節點或者是從節點,時間服務器地址ntpdate cn.pool.ntp.org,這個函數可根據狀況而定,請務必保持各個node節點與主節點的時間一致
def_ntp (){
    case $1 in
    nodes)
        for ip in ${NODES_IP[@]}
        do
            ssh -p $PORT root@$ip "yum -y install ntp"
            ssh -p $PORT root@$ip "systemctl start ntpd"
            ssh -p $PORT root@$ip "systemctl enable ntpd"
            ssh -p $PORT root@$ip "ntpdate cn.pool.ntp.org"
        done
    ;;
    master|*)
        yum -y install ntp 
        systemctl start ntpd
        systemctl enable ntpd
        ntpdate cn.pool.ntp.org
    esac
}
##關閉selinux函數,傳入參數master或者nodes,表示在哪裏執行
def_selinux (){
    case $1 in
    nodes)
    echo "+++++++++++++++++++"
        for ip in ${NODES_IP[@]}
        do
            selinux=`ssh -p $PORT root@$ip "getenforce"`
            case ${selinux} in
                    Enforcing)
                            echo -e "The $ip selinux is running,Please down it!"
                            ssh -p $PORT root@$ip "setenforce 0"
                            MESSAGE1="Modify_the_state_of_SELinux_from_$ip"
                            ssh -p $PORT root@$ip "sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config  2>&1"
                            def_command ${MESSAGE1}
                    ;;
                    Permissive)
                            echo -e "The $ip SELinux state is permissive."
                            STATE=`ssh -p $PORT root@$ip "grep 'SELINUX=enforcing' /etc/selinux/config"`
                            echo $STATE
                            if [ ! -n "${STATE}" ]; then  
                                    echo -e "the host $ip SELinux has been closed."  
                            else  
                                    echo "please down selinux!"  
                                    MESSAGE1="Modify_the_state_of_SELinux_from_$ip"
                                ssh -p $PORT root@$ip "sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config  2>&1"
                                    def_command ${MESSAGE1}
                            fi
                ;;
                Disabled)
                        echo -e "the host $ip SELinux has been closed."
                ;;
                *)
                        echo -e "ERROR:Parameters for incoming error of control flow,you should incoming enforcing or permissive!"
                esac
        done
        ;;
    master)
        selinux=`getenforce`
        case ${selinux} in
        Enforcing)
            echo -e "The $1 selinux is running,Please down it!"
            setenforce 0 
            MESSAGE1="Modify_${1}_the_state_of_SELinux"
            sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config  2>&1
            def_command ${MESSAGE1}
        ;;
        Permissive)
            echo -e "The ${1} SELinux state is permissive."
            STATE=`grep 'SELINUX=enforcing' /etc/selinux/config`
            if [ ! -n "${STATE}" ]; then  
                echo -e "${1} SELinux has been closed."  
            else  
                echo "please down $1 selinux!"  
                MESSAGE1="Modify_${1}_the_state_of_SELinux"
                        sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config  2>&1
                def_command ${MESSAGE1}
            fi
        ;;
        Disabled)
            echo -e "${1} SELinux has been closed."
        ;;
        *)
            echo -e "Parameters for incoming error of control flow,you should incoming enforcing or permissive!"
        esac
    esac
}
####關閉防火牆,傳入參數master或者nodes
def_firewalld (){
    case $1 in
    master)
        FIREWALL_STATE=`systemctl status firewalld| grep  "Active"| awk '{print $3}'`
        echo -e $FIREWALL_STATE
        if [ "${FIREWALL_STATE}" == "(dead)" ];then
            echo -e "The ${1} firewalld had been done!"
            systemctl disable firewalld
        elif [ "${FIREWALL_STATE}" == "(running)" ];then
            echo -e "The ${1} firewalld having running.Now, we will down it! "
            sleep 3
            MESSAGE1="Modify_${1}firewall_status"
            systemctl stop firewalld
            def_command ${MESSAGE1}
            systemctl disable firewalld
        else
            echo -e "Unable to getI ${1} firewall status!"
        fi
    ;;
    nodes)
        for ip in ${NODES_IP[@]}
        do
            FIREWALL_STATE=`ssh -p $PORT root@$ip "systemctl status firewalld| grep  'Active'"| awk '{print $3}'`
            echo -e $FIREWALL_STATE
            if [ "${FIREWALL_STATE}" == "(dead)" ];then
                            echo -e "The ${ip} firewalld had been done!"
                            ssh -p $PORT root@$ip "systemctl disable firewalld"
                    elif [ "${FIREWALL_STATE}" == "(running)" ];then
                            echo -e "The ${ip} firewalld having running.Now, we will down it! "
                            sleep 3
                            MESSAGE1="Modify${ip}_firewall_status"
                            ssh -p $PORT root@$ip "systemctl stop firewalld"
                            def_command ${MESSAGE1}
                            ssh -p $PORT root@$ip "systemctl disable firewalld"
                    else
                            echo -e "Unable to get ${ip} firewall status!"
                    fi
        done
    esac
}
###下載kubernetes.repo和docker-ce.repo文件
def_k8s_docker_repo (){
    MESSAGE1="The_yum_source_with_kubernetes_added_is_successful"
    cat > /etc/yum.repos.d/kubernetes.repo <<EOF 
[kubernetes]
name=kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
enabled=1
EOF
    def_command ${MESSAGE1}
    sleep 3
    MESSAGE2="The_yum_source_with_docker_added_is_successful"
    MESSAGE3="DOWMLOAD_GPGKEY"
    MESSAGE4="Gpckey_successfully_added_docker"
    which "wget" > /dev/null
    if [ $? -eq 0 ]
    then
            echo -e "command is exist"
    else
            echo "command not exist"
        yum -y install wget
    fi
    wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    def_command ${MESSAGE2}
    sleep 3
    wget -O /tmp/rpm-package-key.gpg  https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    def_command ${MESSAGE3}
    sleep 3
    rpm --import /tmp/rpm-package-key.gpg
    def_command ${MESSAGE4}
}
##發送kubernetes.repo和docker-ce.repo文件到各個node節點
def_send_repo_nodes (){
    for ip in ${NODES_IP[@]}
    do
        scp -p$PORT /etc/yum.repos.d/docker-ce.repo root@$ip:/etc/yum.repos.d/
        scp -p$PORT /etc/yum.repos.d/kubernetes.repo root@$ip:/etc/yum.repos.d/
        scp -p$PORT /tmp/rpm-package-key.gpg root@$ip:/tmp/
        ssh -p$PORT root@$ip "rpm --import /tmp/rpm-package-key.gpg"
    done
}
###傳入參數master或者nodes,安裝docker-ce,kubelet,kubectl kubeadm,引入數組VAR_INSTALL_SERVICE
def_install_service (){
    case $1 in
    master)
        for service in ${VAR_INSTALL_SERVICE[@]}
        do
            echo "now ,we are in $1 to install $service"
            sleep 2
            yum -y install $service
        done
    ;;
    nodes)
        for ip in ${NODES_IP[@]}
        do
            for service in ${VAR_INSTALL_SERVICE[@]}
            do
                echo "now ,we are in $1 $ip to install $service"
                sleep 2
                ssh -p $PORT root@$ip "yum -y install $service"
            done
        done
    esac
}
##傳入參數master或者nodes,啓動docker kubelet服務,引入數組VAR_START_SERVICE
def_start_service (){
    case $1 in
    master)
        for service in ${VAR_START_SERVICE[@]}
        do
            echo -e "This is master,Now,we will start $service"
            systemctl enable $service
            systemctl restart $service
            def_command
            sleep 3
        done
    ;;
    nodes)
        for ip in ${NODES_IP[@]}
        do
            for service in ${VAR_START_SERVICE[@]}
                    do
                            echo -e "This is $ip nodes,Now,we will start $service"
                            ssh -p $PORT root@$ip "systemctl enable $service"
                            ssh -p $PORT root@$ip "systemctl restart $service"
                def_command
                            sleep 3
                    done
        done
    esac    
}
###不解釋
def_swap_off (){
    cat > /etc/sysconfig/kubelet << EOF
KUBELET_EXTRA_ARGS="--fail-swap-on=false"
EOF
}
#不解釋
def_send_swap (){
    for ip in ${NODES_IP[@]}
    do
        scp -p$PORT /etc/sysconfig/kubelet root@$ip:/etc/sysconfig/
    done
}
#不解釋
def_configurekernel (){
    echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/k8s.conf; 
    echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.d/k8s.conf; 
    sysctl -p /etc/sysctl.d/k8s.conf
}
#不解釋
def_send_configurekernel (){
    for ip in ${NODES_IP[@]}
    do
        scp -p$PORT /etc/sysctl.d/k8s.conf root@$ip:/etc/sysctl.d/
            sysctl -p /etc/sysctl.d/k8s.conf
    done
}
###因爲訪問不了外國網站,全部咱們須要先把須要的組件鏡像下載下來,能夠安全文明上網的朋友能夠註釋掉該函數
def_master_image_download (){
###定義鏡像列表
    IMAGES=(kube-apiserver-amd64:v1.12.1 kube-controller-manager-amd64:v1.12.1 kube-scheduler-amd64:v1.12.1 kube-proxy-amd64:v1.12.1 pause:3.1 etcd-amd64:3.2.24 coredns:1.2.2)
##該數組打標籤須要用到
    Images=(kube-apiserver:v1.12.1 kube-controller-manager:v1.12.1 kube-scheduler:v1.12.1 kube-proxy:v1.12.1 pause:3.1 etcd:3.2.24 coredns:1.2.2)
    B="0"
    for image in ${IMAGES[@]}
    do
        echo -e "now we will pull $image"
        docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$image
        docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$image k8s.gcr.io/${Images[$B]}
        docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$image
        B=$[$B+1]
    done
}
###初始化kubeadm
def_master_init (){
    kubeadm init --kubernetes-version=v1.12.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap 
}
###不解釋
def_master_configure (){
    mkdir -p $HOME/.kube
    cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    chown $(id -u):(id -g) $HOME/.kube/config
}
###安裝flannel網絡組件,該軟件能夠使不一樣主機的不一樣容器之間相互訪問
def_flannel (){
    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
}
###nodes節點初始化前的準備,安裝所須要的組件
def_nodes_image_download (){
    for IP in ${NODES_IP[@]}
    do
        NODES_IMAGES=(kube-proxy-amd64:v1.12.1 pause:3.1)
        NODES_Images=(kube-proxy:v1.12.1 pause:3.1)
        C="0"
        for NODES_IMAGE in ${NODES_IMAGES[@]}
        do
            echo -e "now we will puu $NODES_IMAGE"
            ssh -p $PORT root@$IP "docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$NODES_IMAGE"
            ssh -p $PORT root@$IP "docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$NODES_IMAGE k8s.gcr.io/${NODES_Images[$C]}"
            ssh -p $PORT root@$IP "docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$NODES_IMAGE"
                    C=$[$C+1]
        done
    done
}
###加入master主節點
def_join_master (){
    ###獲取token口令,自maser節點搭建成功時,其有效期爲24小時
    GET_TOKEN=`kubeadm token list | awk '{print $1}' | sed "1d"`
####不解釋
    GET_TOKEN_CA=`openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'`
    for ip in ${NODES_IP[@]}
    do
        ssh -p $PORT root@$ip "kubeadm join $MASTER_IP:6443 --token ${GET_TOKEN} --discovery-token-ca-cert-hash sha256:${GET_TOKEN_CA} --ignore-preflight-errors=Swap"
    done
}
##應用以上函數的
def_do_install_cluster (){
    def_get_master_ip
    def_get_nodes_ip
    def_create_key
    def_send_key
    def_host
    def_send_host
    def_ntp master
    def_ntp nodes
    def_selinux master
    def_selinux nodes
    def_firewalld master
    def_firewalld nodes
    def_k8s_docker_repo
    def_send_repo_nodes
    def_install_service master
    def_install_service nodes
    def_start_service master
    def_start_service nodes
    def_swap_off
    def_send_swap    
    def_configurekernel
    def_send_configurekernel
    def_master_image_download
    def_master_init
    def_master_configure
    def_flannel
    def_nodes_image_download
    def_join_master
}
echo -e "####################################################################################################"
echo -e "Now,we will do that the script builds the kubernetes cluster"
def_do_install_cluster
相關文章
相關標籤/搜索