Linux集羣架構(二)

Linux集羣架構(二)

目錄html

8、LVS DR模式搭建
9、keepalived + LVS
10、擴展mysql


8、LVS DR模式搭建

一、實驗環境:linux

四臺機器:nginx

client: 10.0.1.50web

Director節點: (ens32 10.0.1.55 vip ens32:0 10.0.1.58)算法

Real server1: (ens32 10.0.1.56 vip lo:0 10.0.1.58)sql

Real server2: (ens32 10.0.1.57 vip lo:0 10.0.1.58)vim

二、安裝後端

//兩臺real server需安裝web服務。以前已經裝過,略過
//在director安裝ipvsadm軟件包,可參考lvs nat部分
[root@lvs-dr ~]# yum -y install ipvsadm

三、在director上配置腳本bash

[root@lvs-dr1 ~]# vim /usr/local/sbin/lvs-dr.sh
#!/bin/bash
echo 1 > /proc/sys/net/ipv4/ip_forward
ipv=/usr/sbin/ipvsadm
vip=10.0.1.58
rs1=10.0.1.56
rs2=10.0.1.57
ifconfig ens32:0 $vip broadcast $vip netmask 255.255.255.255 up
route add -host $vip dev ens32:0
$ipv -C
$ipv -A -t $vip:80 -s rr
$ipv -a -t $vip:80 -r $rs1:80 -g -w 3
$ipv -a -t $vip:80 -r $rs2:80 -g -w 1

//賦予755權限,
[root@lvs-dr1 ~]# chmod 755 /usr/local/sbin/lvs-dr.sh

//執行腳本 
[root@lvs-dr1 ~]# /usr/local/sbin/lvs-dr.sh

//查看狀態
[root@lvs-dr1 ~]# chmod 755 /usr/local/sbin/lvs-dr.sh^C
[root@lvs-dr1 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.0.1.58:80 rr
  -> 10.0.1.56:80                 Route   3      0          0         
  -> 10.0.1.57:80                 Route   1      0          0

四、在兩臺real server配置腳本

[root@lvs-backend1 ~]# vim /usr/local/sbin/lvs-dr-rs.sh
#!/bin/bash
vip=10.0.1.58
ifconfig lo:0 $vip broadcast $vip netmask 255.255.255.255 up
route add -host $vip lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce

//賦予755權限,而後執行
[root@lvs-backend1 ~]# chmod 755 /usr/local/sbin/lvs-dr-rs.sh  

//執行
[root@lvs-backend1 ~]# /usr/local/sbin/lvs-dr-rs.sh

五、測試

//當前採用的是rr調度算法
Last login: Mon Jul 23 14:47:55 2018
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:a2:07:b1 brd ff:ff:ff:ff:ff:ff
    inet 10.0.1.50/24 brd 10.0.1.255 scope global ens32
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fea2:7b1/64 scope link 
       valid_lft forever preferred_lft forever
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]#


9、keepalived + LVS

LVS能夠實現負載均衡,可是不可以進行健康檢查,如一個rs出現故障,LVS 仍然會把請求轉發給故障的rs服務器,這就會致使請求的無效性。keepalive 軟件能夠進行健康檢查,並且能同時實現 LVS 的高可用性,解決 LVS 單點故障的問題,其實 keepalive 就是爲 LVS 而生的。

一、實驗環境

4臺節點

Keepalived1 + lvs1(Director1):10.0.1.55
Keepalived2 + lvs2(Director2):10.0.1.59
Real server1:10.0.1.56
Real server2:10.0.1.57
VIP: 192.168.0.58

2.軟件安裝

//Keepalived + lvs兩個節點安裝
[root@localhost ~]# yum install ipvsadm keepalived -y

//兩個read server安裝nignx, 以前環境已經安裝過,此處略

3.設置配置腳本

//兩臺real server節點創建腳本
[root@lvs-backend1 ~]# vim /usr/local/sbin/lvs-dr-rs.sh
#!/bin/bash
vip=10.0.1.58
ifconfig lo:0 $vip broadcast $vip netmask 255.255.255.255 up
route add -host $vip lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce

//賦予755權限,而後執行
[root@lvs-backend1 ~]# chmod 755 /usr/local/sbin/lvs-dr-rs.sh  

//執行
[root@lvs-backend1 ~]# /usr/local/sbin/lvs-dr-rs.sh

//兩臺keepalived節點配置
//master節點配置文件
[root@lvs-dr1 ~]# vim /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
    #備用服務器上爲 BACKUP
    state MASTER
    interface ens32
    virtual_router_id 51
    #備用服務器上爲90
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass aminglinux
    }
    virtual_ipaddress {
        10.0.1.58
    }
}
virtual_server 10.0.1.58 80 {
    #(每隔10秒查詢realserver狀態)
    delay_loop 10
    #(lvs 算法)
    lb_algo wrr
    #(DR模式)
    lb_kind DR
    #(同一IP的鏈接60秒內被分配到同一臺realserver)
    #實驗環境註釋掉,否則看不到rr的效果
    #persistence_timeout 60
    #(用TCP協議檢查realserver狀態)
    protocol TCP

    real_server 10.0.1.56 80 {
        #(權重)
        weight 1
        TCP_CHECK {
        #(10秒無響應超時)
        connect_timeout 10
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
        }
    }
    real_server 10.0.1.57 80 {
        weight 1
        TCP_CHECK {
        connect_timeout 10
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
        }
     }
}

//backup節點
[root@lvs-backend2 ~]# vim /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
    #備用服務器上爲 BACKUP
    state BACKUP
    interface ens32
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass aminglinux
    }
    virtual_ipaddress {
        10.0.1.58
    }
}
virtual_server 10.0.1.58 80 {
    #(每隔10秒查詢realserver狀態)
    delay_loop 10
    #(lvs 算法)
    lb_algo rr
    #(DR模式)
    lb_kind DR
    #(同一IP的鏈接60秒內被分配到同一臺realserver)
    #persistence_timeout 60
    #(用TCP協議檢查realserver狀態)
    protocol TCP

    real_server 10.0.1.56 80 {
        #(權重)
        weight 1
        TCP_CHECK {
        #(10秒無響應超時)
        connect_timeout 10
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
        }
    }
    real_server 10.0.1.57 80 {
        weight 1
        TCP_CHECK {
        connect_timeout 10
        nb_get_retry 3
        delay_before_retry 3
        connect_port 80
        }
     }
}

4.在keepalived兩個節點開啓轉發功能

[root@lvs-dr1 ~]# echo 1 > /proc/sys/net/ipv4/ip_forward

5.在兩個節點啓動keepalive,

[root@lvs-dr1 ~]# systemctl start keepalived.service
[root@lvs-dr2 ~]# systemctl start keepalived.service

6.測試

//測試1:手動關閉10.0.1.56節點的nginx,在客戶端上去測試訪問

//在10.0.1.56上操做
[root@lvs-backend1 ~]# /usr/local/nginx/sbin/nginx -s stop
[root@lvs-backend1 ~]# lsof -i :80

//在10.0.1.50客戶端上測試
Last login: Mon Jul 23 14:49:10 2018 from 10.0.1.229
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
//結果正常,不會出現訪問10.0.1.56節點,一直訪問的是10.0.1.57節點的內容。

//測試2 手動從新開啓 10.0.1.56 節點的nginx, 在客戶端上去測試訪問
//在10.0.1.56上操做
[root@lvs-backend1 ~]# lsof -i :80
[root@lvs-backend1 ~]# /usr/local/nginx/sbin/nginx
[root@lvs-backend1 ~]# lsof -i :80
COMMAND  PID  USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
nginx   2969  root    6u  IPv4  48805      0t0  TCP *:http (LISTEN)
nginx   2970 nginx    6u  IPv4  48805      0t0  TCP *:http (LISTEN)
nginx   2971 nginx    6u  IPv4  48805      0t0  TCP *:http (LISTEN)
//在10.0.1.50上測試
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
//結果正常,按照 rr 調度算法訪問10.0.1.56節點和10.0.1.57節點內容。

//測試 keepalived 的HA特性
//ip addr查看,此時10.0.1.58的vip在主上
       valid_lft forever preferred_lft forever
[root@lvs-dr1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:85:24:8c brd ff:ff:ff:ff:ff:ff
    inet 10.0.1.55/24 brd 10.0.1.255 scope global ens32
       valid_lft forever preferred_lft forever
    inet 10.0.1.58/32 scope global ens32
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe85:248c/64 scope link 
       valid_lft forever preferred_lft forever
//中止master上的keepalived
[root@lvs-dr1 ~]# systemctl stop keepalived.service 
[root@lvs-dr1 ~]# 

//在dr2上查看,vip搶佔過來了
[root@lvs-dr2 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:dd:53:4e brd ff:ff:ff:ff:ff:ff
    inet 10.0.1.59/24 brd 10.0.1.255 scope global ens32
       valid_lft forever preferred_lft forever
    inet 10.0.1.58/32 scope global ens32
       valid_lft forever preferred_lft forever
    inet6 fe80::c388:e67a:4ac3:6566/64 scope link 
       valid_lft forever preferred_lft forever

//在10.0.1.50上測試
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
I am Lvs-backend1!!!
[root@localhost ~]# curl 10.0.1.58
I am lvs-backend2!!!
[root@localhost ~]# curl 10.0.1.58
//能夠正常訪問後端的網站,驗證了keepalived的特性

//從新開啓master上的keepalived
[root@lvs-dr1 ~]# systemctl start keepalived.service
[root@lvs-dr1 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:85:24:8c brd ff:ff:ff:ff:ff:ff
    inet 10.0.1.55/24 brd 10.0.1.255 scope global ens32
       valid_lft forever preferred_lft forever
    inet 10.0.1.58/32 scope global ens32
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe85:248c/64 scope link 
       valid_lft forever preferred_lft forever


10、擴展

heartbeat和keepalived比較

http://blog.csdn.net/yunhua_lee/article/details/9788433

DRBD工做原理和配置

http://502245466.blog.51cto.com/7559397/1298945

mysql+keepalived

http://lizhenliang.blog.51cto.com/7876557/1362313

lvs 三種模式詳解

http://www.it165.net/admin/html/201401/2248.html

lvs幾種算法

http://www.aminglinux.com/bbs/thread-7407-1-1.html

關於arp_ignore和 arp_announce

http://www.cnblogs.com/lgfeng/archive/2012/10/16/2726308.html

lvs原理相關的

http://blog.csdn.net/pi9nc/article/details/23380589

haproxy+keepalived

http://blog.csdn.net/xrt95050/article/details/40926255

nginx、lvs、haproxy比較

http://www.csdn.net/article/2014-07-24/2820837

keepalived中自定義腳本 vrrp_script

http://my.oschina.net/hncscwc/blog/158746

lvs dr模式只使用一個公網ip的實現方法

http://storysky.blog.51cto.com/628458/338726

相關文章
相關標籤/搜索