keepalived+lvs基於http檢測

keepalived+lvs基於http檢測

keepalived+lvs基於tcp檢測沒法探測到後端的Java程序是否假死,因此此時就須要用到基於http的檢測方法。
基於http檢測的原理是檢測後端服務器上的某個頁面,若是能獲取到則表示後端服務器存活,不然表示後端服務器故障。
語法格式html

HTTP_GET {                      #基於http作後端服務器的健康狀態檢測
    url {                       #
        path /path/to/page      #指定所要檢測頁面作在的位置
        status_code XXX         #狀態碼通常爲200
    }
    connect_timeout 5           #鏈接超時時間5秒
    nb_get_retry 3              #重試次數3次
    delay_before_retry 3        #每次重試的間隔時間
}

基於http檢測的實現

準備主機4臺linux

server hostname ip
keepalived s1 172.20.27.10
keepalived s2 172.20.27.11
nginx web1 172.20.27.20
nginx web2 172.20.27.21

s1節點配置

1.修改keepalived配置文件nginx

[root@s1 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
    root@mylinuxops.com
   }
   notification_email_from root@mylinuxops.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id s1.mylinuxops.com
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state Master
    interface ens33
    virtual_router_id 27
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    unicast_src_ip 172.20.27.10
    unicast_peer {
    172.20.27.11
    }
    virtual_ipaddress {
    172.20.27.100 dev ens33 label ens33:0
    }
}

virtual_server 172.20.27.100 80 {
    delay_loop 6
    lb_algo wrr
    lb_kind DR
    protocol TCP

    real_server 172.20.27.20 80 {
    weight 1
    HTTP_GET {
        url {
        path /monitor-page/index.html
        status_code 200
        }
        nb_get_retry 3
        delay_before_retry 3
        connect_timeout 5
    }
}
    real_server 172.20.27.21 80 {
        weight 1
    HTTP_GET {
        url {
            path /monitor-page/index.html
            status_code 200
            }
        nb_get_retry 3
        delay_before_retry 3
        connect_timeout 5
        }
    }
}

2.重啓服務查看lvs規則web

[root@s1 ~]# systemctl restart keepalived
[root@s1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.20.27.100:80 wrr
#因爲後端的web服務器沒有檢測頁面,因此沒有後端的realserver

s2節點配置

1.修改keepalived配置文件後端

[root@s2 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
    root@mylinuxops.com
   }
   notification_email_from root@mylinuxops.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id s2.mylinuxops.com
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 27
    priority 80
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    unicast_src_ip 172.20.27.11
    unicast_peer {
    172.20.27.10
    }
    virtual_ipaddress {
    172.20.27.100 dev ens33 label ens33:0
    }
}

virtual_server 172.20.27.100 80 {
    delay_loop 5
    lb_algo wrr
    lb_kind DR
    protocol TCP

    real_server 172.20.27.20 80 {
        weight 1
        HTTP_GET {
            url {
                path /monitor-page/index.html
                status_code 200
            }
            nb_get_retry 3
            delay_before_retry 3
            connect_timeout 5
        }   
    }
    real_server 172.20.27.21 80 {
        weight 1
        HTTP_GET {
            url {
                path /monitor-page/index.html
                status_code 200
            }
            nb_get_retry 3
            delay_before_retry 3
            connect_timeout 5
        }
    }
}

2.重啓服務後查看lvs規則bash

[root@s2 ~]# systemctl restart keepalived
[root@s2 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.20.27.100:80 wrr
#s1節點相同沒有後端的服務器

配置後端用於檢測的頁面

在web1和web2上建立檢測頁面服務器

[root@localhost ~]# mkdir /apps/nginx/html/monitor-page
[root@localhost ~]# echo "ojbk" > /apps/nginx/html/monitor-page/index.html

在web1和web2上分別執行lvs-rs腳本app

[root@localhost ~]# bash lvs_dr_rs.sh start

腳本內容tcp

vip=172.20.27.100
mask='255.255.255.255'
dev=lo:1

case $1 in
start)
    echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
    echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
    echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
    ifconfig $dev $vip netmask $mask #broadcast $vip up
    #route add -host $vip dev $dev
    echo "The RS Server is Ready!"
    ;;
stop)
    ifconfig $dev down
    echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
    echo "The RS Server is Canceled!"
    ;;
*)
    echo "Usage: $(basename $0) start|stop"
    exit 1
    ;;
esac

測試

再次查看s1,s2節點上的lvs規則ide

[root@s1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.20.27.100:80 wrr
  -> 172.20.27.20:80              Route   1      0          0         
  -> 172.20.27.21:80              Route   1      0          0
[root@s2 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.20.27.100:80 wrr
  -> 172.20.27.20:80              Route   1      0          0         
  -> 172.20.27.21:80              Route   1      0          0
相關文章
相關標籤/搜索