GitGub代碼地址:https://github.com/GuoZhaoran/spikeSystemjavascript
1java
大型高併發系統架構 node
#配置負載均衡 upstream load_rule { server 127.0.0.1:3001 weight=1; server 127.0.0.1:3002 weight=2; server 127.0.0.1:3003 weight=3; server 127.0.0.1:3004 weight=4; } ... server { listen 80; server_name load_balance.com www.load_balance.com; location / { proxy_pass http://load_rule; } }
package main import ( "net/http" "os" "strings" ) func main() { http.HandleFunc("/buy/ticket", handleReq) http.ListenAndServe(":3001", nil) } //處理請求函數,根據請求將響應結果信息寫入日誌 func handleReq(w http.ResponseWriter, r *http.Request) { failedMsg := "handle in port:" writeLog(failedMsg, "./stat.log") } //寫入日誌 func writeLog(msg string, logPath string) { fd, _ := os.OpenFile(logPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) defer fd.Close() content := strings.Join([]string{msg, "\r\n"}, "3001") buf := []byte(content) fd.Write(buf) }
ab -n 1000 -c 100 http://www.load_balance.com/buy/ticket
統計日誌中的結果,3001-3004端口分別獲得了100、200、300、400的請求量,這和我在nginx中配置的權重佔比很好的吻合在了一塊兒,而且負載後的流量很是的均勻、隨機。linux
具體的實現你們能夠參考nginx的upsteam模塊實現源碼,這裏推薦一篇文章:nginx
https://www.kancloud.cn/digest/understandingnginx/202607git
2github
秒殺搶購系統選型 redis
從上面的介紹咱們知道用戶秒殺流量經過層層的負載均衡,均勻到了不一樣的服務器上,即便如此,集羣中的單機所承受的QPS也是很是高的。如何將單機性能優化到極致呢?要解決這個問題,咱們就要想明白一件事:數據庫
一般訂票系統要處理生成訂單、減扣庫存、用戶支付這三個基本的階段,咱們系統要作的事情是要保證火車票訂單不超賣、很多賣,每張售賣的車票都必須支付纔有效,還要保證系統承受極高的併發。apache
這三個階段的前後順序改怎麼分配才更加合理呢?咱們來分析一下:
當用戶併發請求到達服務端時,首先建立訂單,而後扣除庫存,等待用戶支付。這種順序是咱們通常人首先會想到的解決方案,這種狀況下也能保證訂單不會超賣,由於建立訂單以後就會減庫存,這是一個原子操做。
可是這樣也會產生一些問題。
第一就是在極限併發狀況下,任何一個內存操做的細節都相當影響性能,尤爲像建立訂單這種邏輯,通常都須要存儲到磁盤數據庫的,對數據庫的壓力是可想而知的;
第二是若是用戶存在惡意下單的狀況,只下單不支付這樣庫存就會變少,會少賣不少訂單,雖然服務端能夠限制IP和用戶的購買訂單數量,這也不算是一個好方法。
若是等待用戶支付了訂單在減庫存,第一感受就是不會少賣。可是這是併發架構的大忌,由於在極限併發狀況下,用戶可能會建立不少訂單,當庫存減爲零的時候不少用戶發現搶到的訂單支付不了了,這也就是所謂的「超賣」。也不能避免併發操做數據庫磁盤IO
3
扣庫存的藝術
在單機低併發狀況下,咱們實現扣庫存一般是這樣的:
爲了保證扣庫存和生成訂單的原子性,須要採用事務處理,而後取庫存判斷、減庫存,最後提交事務,整個流程有不少IO,對數據庫的操做又是阻塞的。這種方式根本不適合高併發的秒殺系統。
接下來咱們對單機扣庫存的方案作優化:本地扣庫存。咱們把必定的庫存量分配到本地機器,直接在內存中減庫存,而後按照以前的邏輯異步建立訂單。改進過以後的單機系統是這樣的:
4
代碼演示
... //localSpike包結構體定義 package localSpike type LocalSpike struct { LocalInStock int64 LocalSalesVolume int64 } ... //remoteSpike對hash結構的定義和redis鏈接池 package remoteSpike //遠程訂單存儲健值 type RemoteSpikeKeys struct { SpikeOrderHashKey string //redis中秒殺訂單hash結構key TotalInventoryKey string //hash結構中總訂單庫存key QuantityOfOrderKey string //hash結構中已有訂單數量key } //初始化redis鏈接池 func NewPool() *redis.Pool { return &redis.Pool{ MaxIdle: 10000, MaxActive: 12000, // max number of connections Dial: func() (redis.Conn, error) { c, err := redis.Dial("tcp", ":6379") if err != nil { panic(err.Error()) } return c, err }, } } ... func init() { localSpike = localSpike2.LocalSpike{ LocalInStock: 150, LocalSalesVolume: 0, } remoteSpike = remoteSpike2.RemoteSpikeKeys{ SpikeOrderHashKey: "ticket_hash_key", TotalInventoryKey: "ticket_total_nums", QuantityOfOrderKey: "ticket_sold_nums", } redisPool = remoteSpike2.NewPool() done = make(chan int, 1) done <- 1 }
package localSpike //本地扣庫存,返回bool值 func (spike *LocalSpike) LocalDeductionStock() bool{ spike.LocalSalesVolume = spike.LocalSalesVolume + 1 return spike.LocalSalesVolume < spike.LocalInStock }
package remoteSpike ...... const LuaScript = ` local ticket_key = KEYS[1] local ticket_total_key = ARGV[1] local ticket_sold_key = ARGV[2] local ticket_total_nums = tonumber(redis.call('HGET', ticket_key, ticket_total_key)) local ticket_sold_nums = tonumber(redis.call('HGET', ticket_key, ticket_sold_key)) -- 查看是否還有餘票,增長訂單數量,返回結果值 if(ticket_total_nums >= ticket_sold_nums) then return redis.call('HINCRBY', ticket_key, ticket_sold_key, 1) end return 0 ` //遠端統一扣庫存 func (RemoteSpikeKeys *RemoteSpikeKeys) RemoteDeductionStock(conn redis.Conn) bool { lua := redis.NewScript(1, LuaScript) result, err := redis.Int(lua.Do(conn, RemoteSpikeKeys.SpikeOrderHashKey, RemoteSpikeKeys.TotalInventoryKey, RemoteSpikeKeys.QuantityOfOrderKey)) if err != nil { return false } return result != 0 }
hmset ticket_hash_key "ticket_total_nums" 10000 "ticket_sold_nums" 0
package main ... func main() { http.HandleFunc("/buy/ticket", handleReq) http.ListenAndServe(":3005", nil) }
package main //處理請求函數,根據請求將響應結果信息寫入日誌 func handleReq(w http.ResponseWriter, r *http.Request) { redisConn := redisPool.Get() LogMsg := "" <-done //全局讀寫鎖 if localSpike.LocalDeductionStock() && remoteSpike.RemoteDeductionStock(redisConn) { util.RespJson(w, 1, "搶票成功", nil) LogMsg = LogMsg + "result:1,localSales:" + strconv.FormatInt(localSpike.LocalSalesVolume, 10) } else { util.RespJson(w, -1, "已售罄", nil) LogMsg = LogMsg + "result:0,localSales:" + strconv.FormatInt(localSpike.LocalSalesVolume, 10) } done <- 1 //將搶票狀態寫入到log中 writeLog(LogMsg, "./stat.log") } func writeLog(msg string, logPath string) { fd, _ := os.OpenFile(logPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) defer fd.Close() content := strings.Join([]string{msg, "\r\n"}, "") buf := []byte(content) fd.Write(buf) }
ab -n 10000 -c 100 http://127.0.0.1:3005/buy/ticket
This is ApacheBench, Version 2.3 <$Revision: 1826891 $> Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ Licensed to The Apache Software Foundation, http://www.apache.org/ Benchmarking 127.0.0.1 (be patient) Completed 1000 requests Completed 2000 requests Completed 3000 requests Completed 4000 requests Completed 5000 requests Completed 6000 requests Completed 7000 requests Completed 8000 requests Completed 9000 requests Completed 10000 requests Finished 10000 requests Server Software: Server Hostname: 127.0.0.1 Server Port: 3005 Document Path: /buy/ticket Document Length: 29 bytes Concurrency Level: 100 Time taken for tests: 2.339 seconds Complete requests: 10000 Failed requests: 0 Total transferred: 1370000 bytes HTML transferred: 290000 bytes Requests per second: 4275.96 [#/sec] (mean) Time per request: 23.387 [ms] (mean) Time per request: 0.234 [ms] (mean, across all concurrent requests) Transfer rate: 572.08 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 8 14.7 6 223 Processing: 2 15 17.6 11 232 Waiting: 1 11 13.5 8 225 Total: 7 23 22.8 18 239 Percentage of the requests served within a certain time (ms) 50% 18 66% 24 75% 26 80% 28 90% 33 95% 39 98% 45 99% 54 100% 239 (longest request)
//stat.log ... result:1,localSales:145 result:1,localSales:146 result:1,localSales:147 result:1,localSales:148 result:1,localSales:149 result:1,localSales:150 result:0,localSales:151 result:0,localSales:152 result:0,localSales:153 result:0,localSales:154 result:0,localSales:156 ...
5
總結回顧
整體來講,秒殺系統是很是複雜的。咱們這裏只是簡單介紹模擬了一下單機如何優化到高性能,集羣如何避免單點故障,保證訂單不超賣、很多賣的一些策略,完整的訂單系統還有訂單進度的查看,每臺服務器上都有一個任務,定時的從總庫存同步餘票和庫存信息展現給用戶,還有用戶在訂單有效期內不支付,釋放訂單,補充到庫存等等。
做者:繪你一世傾城
來源:juejin.im/post/5d84e21f6fb9a06ac8248149
點擊「閱讀原文」和棧長學更多~