#http模塊內 http { include mime.types; default_type application/octet-stream; log_format main '[$time_local][$msec]$status'; sendfile on; keepalive_timeout 65; proxy_cache_path /var/nginx/cache keys_zone=one:10m levels=1:2 inactive=6h max_size=1g; ###限流配置 limit_conn_zone $binary_remote_addr zone=perip:10m; limit_conn_log_level info; limit_conn_status 503; include conf.d/*.conf; }
#server模塊內 server { listen 80; server_name _; root /opt/openresty/nginx/html; charset utf-8; proxy_send_timeout 60; proxy_read_timeout 1800s; client_max_body_size 300M ; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Server $host; proxy_set_header Host $host:$server_port; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; #---限流配置--# location /limit { limit_conn perip 2; proxy_pass http://backend/cache; } #-----------# error_page 404 /404.html; location = /40x.html { } error_page 500 502 503 504 /50x.html; location = /50x.html { } }
#http模塊內 http { include mime.types; default_type application/octet-stream; log_format main '[$time_local][$msec]$status'; sendfile on; keepalive_timeout 65; proxy_cache_path /var/nginx/cache keys_zone=one:10m levels=1:2 inactive=6h max_size=1g; ###限流配置:每s處理一個請求 limit_req_zone $binary_remote_addr zone=req:10m rate=1r/s; limit_conn_log_level info; limit_conn_status 503; include conf.d/*.conf;
}
server { listen 80; server_name _; root /opt/openresty/nginx/html; charset utf-8; proxy_send_timeout 60; proxy_read_timeout 1800s; client_max_body_size 300M ; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Server $host; proxy_set_header Host $host:$server_port; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; #zone=one :設置使用哪一個配置區域來作限制,與上面limit_req_zone 裏的name對應 #burst=5:設置一個大小爲5的緩衝區當有大量請求(爆發)過來時,超過了訪問頻次限制的請求能夠先放到這個緩衝區內等待,可是這個等待區裏的位置只有5個,超過的請求會直接報503的錯誤而後返回。 #nodelay: # 若是設置,會在瞬時提供處理(burst + rate)個請求的能力,請求超過(burst + rate)的時候就會直接返回503,永遠不存在請求須要等待的狀況。(這裏的rate的單位是:r/s) # 若是沒有設置,則全部請求會依次等待排隊 location /limit_req { limit_req zone=req burst=3 nodelay; proxy_pass http://backend/cache; } error_page 404 /404.html; location = /40x.html { } error_page 500 502 503 504 /50x.html; location = /50x.html { } }
http { include mime.types; default_type application/octet-stream; log_format main '[$time_local][$msec]$status'; sendfile on; keepalive_timeout 65; lua_shared_dict my_limit_conn_store 100m; limit_conn_log_level info; limit_conn_status 503; include conf.d/*.conf; }
server { listen 80; server_name _; root /opt/openresty/nginx/html; charset utf-8; proxy_send_timeout 60; proxy_read_timeout 1800s; client_max_body_size 300M ; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Server $host; proxy_set_header Host $host:$server_port; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; #限制接口總併發數 location /limit_lua_conn { access_by_lua_block { local limit_conn = require "resty.limit.conn" -- 限制一個 ip 客戶端最大 1 個併發請求 -- burst 設置爲 0,若是超過最大的併發請求數,則直接返回503, -- 若是此處要容許突增的併發數,能夠修改 burst 的值(漏桶的桶容量) -- 最後一個參數實際上是你要預估這些併發(或者說單個請求)要處理多久,以便於對桶裏面的請求應用漏桶算法 local lim, err = limit_conn.new("my_limit_conn_store",2,1,0.5) if not lim then ngx.log(ngx.ERR,"限流:",err) return ngx.exit(503) end local key = ngx.var.binary_remote_addr local delay, err = lim:incoming(key, true) if not delay then if err == "rejected" then return ngx.exit(503) end ngx.log(ngx.ERR, "failed to limit req:", err) return ngx.exit(500) end } proxy_pass http://backend/cache; } # error_page 404 /404.html; location = /40x.html { } error_page 500 502 503 504 /50x.html; location = /50x.html { } }
upstream name{ server 192.168.0.21:80; server 192.168.0.22:80; check interval=3000 rise=2 fall=5 timeout=1000; } #對全部節點,每一個3秒檢測一次,請求2次正常則標記 realserver狀態爲up,若是檢測 5 次都失敗,則標記 realserver的狀態爲down,超時時間爲1秒
http { upstream backend { server 120.78.206.183:8080; server 14.116.196.138:8002; } lua_shared_dict healthcheck 1m; lua_socket_log_errors off; init_worker_by_lua_block { local hc = require "resty.upstream.healthcheck" local ok, err = hc.spawn_checker { shm = "healthcheck", upstream = "tomcat", type = "http", #指定後端健康檢查http請求接口 http_req = "GET /nginx HTTP/1.0\r\nHost: tomcat\r\n\r\n", interval = 2000, timeout = 5000, fall = 3, rise = 2, #http請求接口返回200,302表示服務端正常 valid_statuses = {200, 302}, concurrency = 1, } if not ok then ngx.log(ngx.ERR, "=======> failed to spawn health checker: ", err) return end } server { listen 80; server_name localhost; location ^~ /cache { proxy_cache one; proxy_no_cache $http_soapaction; proxy_cache_key $request_body; proxy_cache_valid 200 302 10m; proxy_cache_methods GET POST; proxy_ignore_headers Cache-Control Set-Cookie; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://backend/cache; } location /server/status { access_log off; default_type text/plain; content_by_lua_block { local hc = require "resty.upstream.healthcheck" ngx.say("Nginx Worker PID: ", ngx.worker.pid()) ngx.print(hc.status_page()) } } } }
<wiz_tmp_tag id="wiz-table-range-border" contenteditable="false" style="display: none;">html