LNMT/LAMT實現動靜分離、負載均衡和會話保持

一、本次試驗主要是經過nginx代理到tomcat處理動態響應;javascript

二、經過httpd代理到tomcat作動態請求的處理;css

三、經過httpd和tomcat實現session會話的綁定;html

四、經過httpd和tomcat實現session會話的保持;前端

五、經過httpd實現tomcat負載均衡效果;java

1、LNMT的試驗配置node

LNMT:
python

主機 IP
haproxy 192.168.0.111    node1.soul.com
varnish 192.168.0.112    node2.soul.com
nginx+tomcat 192.168.0.113    node3.soul.com
httpd 192.168.0.114    node4.soul.com

wKioL1NvnmjiD8CvAACva2U_STA485.jpg

1)配置haproxymysql

#直接yum安裝haproxy便可;
[root@node1 ~]# vim /etc/haproxy/haproxy.cfg
frontend  main *:80
    acl url_static       path_beg       -i /static /p_w_picpaths /javascript /stylesheets
    acl url_static       path_end       -i .jpg .gif .png .css .js .html .htm
    acl url_dynamic      path_end       -i .jsp .do
    use_backend static          if url_static
    use_backend dynamic         if url_dynamic
    default_backend             static
#---------------------------------------------------------------------
# static backend for serving up p_w_picpaths, stylesheets and such
#---------------------------------------------------------------------
backend static
    balance     roundrobin
    server      static 192.168.0.112:80 check
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend dynamic
    balance     roundrobin
    server  node3 192.168.0.113:80 check
[root@node1 ~]# service haproxy start
Starting haproxy:                                          [  OK  ]
[root@node1 ~]# ss -tunl | grep 80
tcp    LISTEN     0      128                    *:80                    *:* 
#測試啓動正常

2)配置varnish:
nginx

#安裝就不介紹;前面有介紹
[root@node2 ~]# vim /etc/sysconfig/varnish
VARNISH_LISTEN_PORT=80    更改監聽端口
#
# # Telnet admin interface listen address and port
VARNISH_ADMIN_LISTEN_ADDRESS=127.0.0.1
VARNISH_ADMIN_LISTEN_PORT=6082
#
# # Shared secret file for admin interface
VARNISH_SECRET_FILE=/etc/varnish/secret
#
# # The minimum number of worker threads to start
VARNISH_MIN_THREADS=50
#
# # The Maximum number of worker threads to start
VARNISH_MAX_THREADS=1000
#
# # Idle timeout for worker threads
VARNISH_THREAD_TIMEOUT=120
#
# # Cache file location
VARNISH_STORAGE_FILE=/var/lib/varnish/varnish_storage.bin
#
# # Cache file size: in bytes, optionally using k / M / G / T suffix,
# # or in percentage of available disk space using the % suffix.
VARNISH_STORAGE_SIZE=1G
#
# # Backend storage specification
#VARNISH_STORAGE="file,${VARNISH_STORAGE_FILE},${VARNISH_STORAGE_SIZE}"
VARNISH_STORAGE="malloc,100M"    更改存儲類型

3)配置vcl文件:web

[root@node2 ~]# vim /etc/varnish/test.vcl
backend static {
  .host = "192.168.0.114";
  .port = "80";
}
acl purgers {
  "127.0.0.1";
  "192.168.0.0"/24;
}
sub vcl_recv {
  if(req.request == "PURGE") {
    if(!client.ip ~ purgers)  {
    error 405 "Method not allowd.";
    }
  }
   if (req.restarts == 0) {
      if (req.http.x-forwarded-for) {
          set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
      } else {
          set req.http.X-Forwarded-For = client.ip;
      }
   }
   return(lookup);
}
sub vcl_hit {
  if(req.request == "PURGE") {
    purge;
    error 200 "Purged Success.";
  }
}
sub vcl_miss {
  if(req.request == "PURGE") {
    purge;
    error 404 "Not in cache.";
  }
}
sub vcl_pass {
  if(req.request == "PURGE") {
    error 502 "Purged on a passed object.";
  }
}
sub vcl_fetch {
  if(req.url ~ "\.(jpg|png|gif|jpeg)$") {
     set beresp.ttl = 7200s;
  }
  if(req.url ~ "\.(html|htm|css|js)$") {
     set beresp.ttl = 1200s;
  }
}
sub vcl_deliver {
  if (obj.hits > 0) {
    set resp.http.X-Cache = "HIT from " + server.ip;
  }else {
    set resp.http.X-Cache = "MISS";
  }
}

4)編譯啓用:

[root@node2 ~]# varnishadm -S /etc/varnish/secret -T 127.0.0.1:6082
varnish> vcl.load test2 test.vcl
200      
VCL compiled.
varnish> vcl.use test2
200      
varnish>

提供靜態頁面:

[root@node4 ~]# service httpd start
Starting httpd:                                            [  OK  ]
[root@node4 ~]# vim /var/www/html/index.html
<h1>This is static page!IP:192.168.0.114</h1>

5)配置nginx和tomcat:

#安裝不作說明;配置nginx
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                               
[root@node3 ~]# vim /etc/nginx/conf.d/default.conf
#在location中定義proxy_pass便可
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                 
    location / {
#        root   /usr/share/nginx/html;
#        index  index.html index.htm;
         proxy_pass http://192.168.0.113:8080;    #所有代理到後端8080端口
    }

配置tomcat

#安裝不作說明;也無需作配置;安裝完成後啓動便可
[root@node3 conf]# ll /etc/rc.d/init.d/tomcat
-rwxr-xr-x 1 root root 1288 May 11 18:28 /etc/rc.d/init.d/tomcat
[root@node3 conf]#
[root@node3 conf]# service tomcat start
Using CATALINA_BASE:   /usr/local/tomcat
Using CATALINA_HOME:   /usr/local/tomcat
Using CATALINA_TMPDIR: /usr/local/tomcat/temp
Using JRE_HOME:        /usr/java/latest
Using CLASSPATH:       /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar
[root@node3 ~]# ss -tunl | grep 8080
tcp    LISTEN     0      100                   :::8080                 :::*

提供jsp頁面文件

[root@node3 ROOT]# pwd
/usr/local/tomcat/webapps/ROOT
[root@node3 ROOT]# vim dynamic.jsp
<%@ page language="java" %>
<%@ page import="java.util.*" %>
<html>
  <head>
    <title>JSP test page.</title>
  </head>
  <body>
    <% out.println("This is dynamic page!"); %>
  </body>
</html>

6)測試訪問:

wKioL1NvVAKyhtZDAAEJeBCSh4c494.jpg

wKiom1NvVDux8UVlAADUnrKShD0379.jpg

測試訪問正常;動靜分離正常。到此LNMT配置完成。mysql能夠安裝測試與LAMP大體相同。

2、LAMT的配置:

大體規劃:

主機 IP
haproxy 192.168.0.111  node1.soul.com
varnish 192.168.0.112  node2.soul.com
httpd 192.168.0.113  node3.soul.com
httpd+tomcat
192.168.0.114  node4.soul.com
tomcat 192.168.0.115  node5.soul.com

wKiom1Nvnqug23ifAAEp9gqhULA106.jpg

這裏是接着上面的配置來的;因此關於haproxy和varnish的配置不作說明了;重點來看httpd和tomcat的整合和負載均衡;其中對應的IP須要更改下:

haproxy:

[root@node1 ~]# vim /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend dynamic
    balance     roundrobin
    server  node4 192.168.0.114:80 check    #這裏的後端更改成114主機

node2上的varnish:

[root@node2 ~]# vim /etc/varnish/test.vcl
backend static {
  .host = "192.168.0.113";
  .port = "80";
}

從新編譯後測試便可.

基於Apache作爲Tomcat前端的架構來說,Apache經過mod_jk、mod_jk2或mod_proxy模塊與後端的Tomcat進行數據交換。而對Tomcat來講,每一個Web容器實例都有一個Java語言開發的鏈接器模塊組件,在Tomcat6中,這個鏈接器是org.apache.catalina.Connector類。這個類的構造器能夠構造兩種類別的鏈接器:HTTP/1.1負責響應基於HTTP/HTTPS協議的請求,AJP/1.3負責響應基於AJP的請求。但能夠簡單地經過在server.xml配置文件中實現鏈接器的建立,但建立時所使用的類根據系統是支持APR(Apache Portable Runtime)而有所不一樣。

APR是附加在提供了通用和標準API的操做系統之上一個通信層的本地庫的集合,它可以爲使用了APR的應用程序在與Apache通訊時提供較好伸縮能力時帶去平衡效用。

同時,須要說明的是,mod_jk2模塊目前已經再也不被支持了,mod_jk模塊目前還apache被支持,但其項目活躍度已經大大下降。所以,目前更經常使用 的方式是使用mod_proxy模塊。

本次以mod_proxy模塊作實驗:

1)配置node4上的httpd和tomcat:

#配置httpd;基於mod_proxy模塊與後端tomcat聯繫;這裏直接爲mod_proxy模塊寫一個單獨的配置文件:
[root@node4 conf.d]# pwd
/etc/httpd/conf.d
[root@node4 conf.d]# vim mod_proxy.conf
ProxyVia on
ProxyRequests off
ProxyPreserveHost on
<proxy balancer://lb>    #定義一個組
BalancerMember ajp://192.168.0.114:8009 loadfactor=1 route=TomcatA
BalancerMember ajp://192.168.0.115:8009 loadfactor=1 route=TomcatB
</proxy>    #組內使用ajp協議進行後端代理
ProxyPass / balancer://lb/        #代理到後端的組
ProxyPa***everse / balancer://lb/
                                                                                                                                                                                                                                                                                                                                                                                                                                
#tomcat的安裝就再也不贅述;提供node4和node5上的頁面文件:
[root@node4 ~]# vim /usr/local/tomcat/webapps/ROOT/dynamic.jsp
<%@ page language="java" %>
<html>
  <head><title>TomcatA</title></head>
  <body>
    <h1><font color="red">TomcatA </font></h1>
    <table align="centre" border="1">
      <tr>
        <td>Session ID</td>
    <% session.setAttribute("abc","abc"); %>
        <td><%= session.getId() %></td>
      </tr>
      <tr>
        <td>Created on</td>
        <td><%= session.getCreationTime() %></td>
     </tr>
    </table>
  </body>
</html>
                                                                                                                                                                                                                                                                                                                                                                                                                                
------------------------------------------------------------------
                                                                                                                                                                                                                                                                                                                                                                                                                                
[root@node5 ~]# vim /usr/local/tomcat/webapps/ROOT/dynamic.jsp
<%@ page language="java" %>
<html>
  <head><title>TomcatB</title></head>
  <body>
    <h1><font color="blue">TomcatB </font></h1>
    <table align="centre" border="1">
      <tr>
        <td>Session ID</td>
    <% session.setAttribute("abc","abc"); %>
        <td><%= session.getId() %></td>
      </tr>
      <tr>
        <td>Created on</td>
        <td><%= session.getCreationTime() %></td>
     </tr>
    </table>
  </body>
</html>
                                                                                                                                                                                                                                                                                                                                                                                                                                
#完成後重啓httpd測試。

wKioL1NveaDyIoADAAEnu0iL4D0287.jpg

wKiom1Nved6gQ7ERAAELnSAI98w948.jpg

測試haproxy的地址192.168.0.111一樣生效:

wKioL1NveiixfRmKAAEMmy6r7EA833.jpg

2)綁定session會話和開啓負載均衡管理界面:

#更改httpd代理的配置文件
[root@node4 ~]# vim /etc/httpd/conf.d/mod_proxy.conf
ProxyVia on
ProxyRequests off
ProxyPreserveHost on
<proxy balancer://lb>
BalancerMember ajp://192.168.0.114:8009 loadfactor=1 route=TomcatA
BalancerMember ajp://192.168.0.115:8009 loadfactor=1 route=TomcatB
</proxy>
<Location /lbmanager>    #定義負載均衡管理界面
        SetHandler balancer-manager
</Location>
ProxyPass /lbmanager !    #該界面是不作代理
ProxyPass / balancer://lb/ stickysession=JSESSIONID    #開啓session綁定
ProxyPa***everse / balancer://lb/
                                                                                                                                                                                                                                                                                                                                                                                       
#除了httpd配置文件;還須要更改tomcat的配置文件;後端兩個節點都須要對應的更改:
[root@node4 ~]# vim /usr/local/tomcat/conf/server.xml
<Engine name="Catalina" defaultHost="localhost" jvmRoute="TomcatA">
#在該行添加jvmRoute="TomcatA"這條信息;node5對應添加爲TomcatB

重啓httpd和tomcat測試:

wKioL1NvfR6TMATcAAE7udfzD14640.jpg

能夠發現後面已有session信息;且刷新頁面不會在調度到TomcatA主機上。

wKioL1NvfXrCjS4EAAJW5QDg3L8343.jpg

測試管理界面也是正常的。能夠在此處作簡單的管理操做。

3)下面配置tomcat的session會話集羣:

因爲各版本可能有差別;能夠參考官方文檔:http://tomcat.apache.org/tomcat-7.0-doc/cluster-howto.html

#更改tomcat的配置文件;在engine段添加以下:
[root@node4 ~]# vim /usr/local/tomcat/conf/server.xml
    <Engine name="Catalina" defaultHost="localhost" jvmRoute="TomcatA">
    <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"
                 channelSendOptions="8">
          <Manager className="org.apache.catalina.ha.session.DeltaManager"
                   expireSessionsOnShutdown="false"
                   notifyListenersOnReplication="true"/>
          <Channel className="org.apache.catalina.tribes.group.GroupChannel">
            <Membership className="org.apache.catalina.tribes.membership.McastService"
                        address="228.0.40.4"    #廣播地址
                        port="45564"
                        frequency="500"
                        dropTime="3000"/>
            <Receiver className="org.apache.catalina.tribes.transport.nio.NioReceiver"
                      address="auto"
                      port="4000"
                      autoBind="100"
                      selectorTimeout="5000"
                      maxThreads="6"/>
            <Sender className="org.apache.catalina.tribes.transport.ReplicationTransmitter">
              <Transport className="org.apache.catalina.tribes.transport.nio.PooledParallelSender"/>
            </Sender>
            <Interceptor className="org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"/>
          </Channel>
          <Valve className="org.apache.catalina.ha.tcp.ReplicationValve"
                 filter=""/>
          <Valve className="org.apache.catalina.ha.session.JvmRouteBinderValve"/>
          <Deployer className="org.apache.catalina.ha.deploy.FarmWarDeployer"
                    tempDir="/tmp/war-temp/"
                    deployDir="/tmp/war-deploy/"
                    watchDir="/tmp/war-listen/"
                    watchEnabled="false"/>
          <ClusterListener className="org.apache.catalina.ha.session.JvmRouteSessionIDBinderListener"/>
          <ClusterListener className="org.apache.catalina.ha.session.ClusterSessionListener"/>
        </Cluster>
                                                                                                                                                                                                                                                                                                                             
#完成後複製一份到node5上;對應的jvmRoute更改便可;

還需在web.xml中添加:

[root@node4 ~]# cp /usr/local/tomcat/conf/web.xml /usr/local/tomcat/webapps/ROOT/WEB-INF/
#由於本次使用的就是默認目錄;因此拷貝到默認目錄下;具體狀況以本身的爲準
[root@node4 ~]# vim /usr/local/tomcat/webapps/ROOT/WEB-INF/web.xml
<distributable/>
#在正文空白中添加上面一行便可;同複製一份到node5;
                                                                                                                                                                                                                                                                                                                     
#註釋掉調度器的黏性
[root@node4 ~]# vim /etc/httpd/conf.d/mod_proxy.conf
# stickysession=JSESSIONID    註釋掉或刪掉便可。
#重啓httpd和tomcat測試。

wKioL1Nvhu6RIzRcAAEyvGgKRqc580.jpg

wKiom1NvhyjSObNyAAEVlrIlrfc258.jpg

能夠看見上述雖然節點改變了;可是sessionID仍是相同的;到此實現了session會話保持。




若有錯誤;懇請糾正。

相關文章
相關標籤/搜索