先看一下官方的調用示例代碼:git
local redis = require "resty.redis" local red = redis:new() red:set_timeout(1000) -- 1 sec local ok, err = red:connect("127.0.0.1", 6379) if not ok then ngx.say("failed to connect: ", err) return end ok, err = red:set("dog", "an animal") if not ok then ngx.say("failed to set dog: ", err) return end ngx.say("set result: ", ok) -- put it into the connection pool of size 100, -- with 10 seconds max idle time local ok, err = red:set_keepalive(10000, 100) if not ok then ngx.say("failed to set keepalive: ", err) return end
這是一個標準的redis接口調用,若是你的代碼中redis被調用頻率不高,那麼這段代碼不會有任何問題。但若是你的項目重度依賴redis,工程中有大量的代碼在重複這樣一個動做,建立鏈接-->數據操做-->關閉鏈接(或放到鏈接池)這個完整的鏈路調用完畢,甚至還要考慮不一樣的return狀況作不一樣處理,就很快發現代碼中有大量的重複。github
Lua
是不支持面向對象的。不少人用盡各類招術利用元表來模擬。但是,Lua
的發明者彷佛不想看到這樣的情形,由於他們把取長度的__len
方法以及析構函數__gc
留給了 C API。純 Lua 只能望洋興嘆。redis
咱們指望的代碼應該是這樣的:數據庫
local red = redis:new() local ok, err = red:set("dog", "an animal") if not ok then ngx.say("failed to set dog: ", err) return end ngx.say("set result: ", ok) local res, err = red:get("dog") if not res then ngx.say("failed to get dog: ", err) return end if res == ngx.null then ngx.say("dog not found.") return end ngx.say("dog: ", res)
而且他自身具有如下幾個特徵:app
不賣關子,只要乾貨,咱們最後是這樣乾的,能夠這裏看到gist代碼dom
-- file name: resty/redis_iresty.lua local redis_c = require "resty.redis" local ok, new_tab = pcall(require, "table.new") if not ok or type(new_tab) ~= "function" then new_tab = function (narr, nrec) return {} end end local _M = new_tab(0, 155) _M._VERSION = '0.01' local commands = { "append", "auth", "bgrewriteaof", "bgsave", "bitcount", "bitop", "blpop", "brpop", "brpoplpush", "client", "config", "dbsize", "debug", "decr", "decrby", "del", "discard", "dump", "echo", "eval", "exec", "exists", "expire", "expireat", "flushall", "flushdb", "get", "getbit", "getrange", "getset", "hdel", "hexists", "hget", "hgetall", "hincrby", "hincrbyfloat", "hkeys", "hlen", "hmget", "hmset", "hscan", "hset", "hsetnx", "hvals", "incr", "incrby", "incrbyfloat", "info", "keys", "lastsave", "lindex", "linsert", "llen", "lpop", "lpush", "lpushx", "lrange", "lrem", "lset", "ltrim", "mget", "migrate", "monitor", "move", "mset", "msetnx", "multi", "object", "persist", "pexpire", "pexpireat", "ping", "psetex", "psubscribe", "pttl", "publish", --[[ "punsubscribe", ]] "pubsub", "quit", "randomkey", "rename", "renamenx", "restore", "rpop", "rpoplpush", "rpush", "rpushx", "sadd", "save", "scan", "scard", "script", "sdiff", "sdiffstore", "select", "set", "setbit", "setex", "setnx", "setrange", "shutdown", "sinter", "sinterstore", "sismember", "slaveof", "slowlog", "smembers", "smove", "sort", "spop", "srandmember", "srem", "sscan", "strlen", --[[ "subscribe", ]] "sunion", "sunionstore", "sync", "time", "ttl", "type", --[[ "unsubscribe", ]] "unwatch", "watch", "zadd", "zcard", "zcount", "zincrby", "zinterstore", "zrange", "zrangebyscore", "zrank", "zrem", "zremrangebyrank", "zremrangebyscore", "zrevrange", "zrevrangebyscore", "zrevrank", "zscan", "zscore", "zunionstore", "evalsha" } local mt = { __index = _M } local function is_redis_null( res ) if type(res) == "table" then for k,v in pairs(res) do if v ~= ngx.null then return false end end return true elseif res == ngx.null then return true elseif res == nil then return true end return false end -- change connect address as you need function _M.connect_mod( self, redis ) redis:set_timeout(self.timeout) return redis:connect("127.0.0.1", 6379) end function _M.set_keepalive_mod( redis ) -- put it into the connection pool of size 100, with 60 seconds max idle time return redis:set_keepalive(60000, 1000) end function _M.init_pipeline( self ) self._reqs = {} end function _M.commit_pipeline( self ) local reqs = self._reqs if nil == reqs or 0 == #reqs then return {}, "no pipeline" else self._reqs = nil end local redis, err = redis_c:new() if not redis then return nil, err end local ok, err = self:connect_mod(redis) if not ok then return {}, err end redis:init_pipeline() for _, vals in ipairs(reqs) do local fun = redis[vals[1]] table.remove(vals , 1) fun(redis, unpack(vals)) end local results, err = redis:commit_pipeline() if not results or err then return {}, err end if is_redis_null(results) then results = {} ngx.log(ngx.WARN, "is null") end -- table.remove (results , 1) self.set_keepalive_mod(redis) for i,value in ipairs(results) do if is_redis_null(value) then results[i] = nil end end return results, err end function _M.subscribe( self, channel ) local redis, err = redis_c:new() if not redis then return nil, err end local ok, err = self:connect_mod(redis) if not ok or err then return nil, err end local res, err = redis:subscribe(channel) if not res then return nil, err end res, err = redis:read_reply() if not res then return nil, err end redis:unsubscribe(channel) self.set_keepalive_mod(redis) return res, err end local function do_command(self, cmd, ... ) if self._reqs then table.insert(self._reqs, {cmd, ...}) return end local redis, err = redis_c:new() if not redis then return nil, err end local ok, err = self:connect_mod(redis) if not ok or err then return nil, err end local fun = redis[cmd] local result, err = fun(redis, ...) if not result or err then -- ngx.log(ngx.ERR, "pipeline result:", result, " err:", err) return nil, err end if is_redis_null(result) then result = nil end self.set_keepalive_mod(redis) return result, err end function _M.new(self, opts) opts = opts or {} local timeout = (opts.timeout and opts.timeout * 1000) or 1000 local db_index= opts.db_index or 0 for i = 1, #commands do local cmd = commands[i] _M[cmd] = function (self, ...) return do_command(self, cmd, ...) end end return setmetatable({ timeout = timeout, db_index = db_index, _reqs = nil }, mt) end return _M
調用示例代碼:ide
local redis = require "resty.redis_iresty" local red = redis:new() local ok, err = red:set("dog", "an animal") if not ok then ngx.say("failed to set dog: ", err) return end ngx.say("set result: ", ok)
在最終的示例代碼中看到,全部的鏈接建立、銷燬鏈接、鏈接池部分,都被完美隱藏了,咱們只須要業務就能夠了。媽媽不再用擔憂我把redis搞垮了。函數
Todo list:目前resty.redis
並無對redis 3.0的集羣API作支持,既然統一了redis的入口、出口,那麼對這個redis_iresty
版本作適當調整完善,就能夠支持redis 3.0的集羣協議。因爲咱們目前還沒引入redis集羣,這裏也但願有使用的同窗貢獻本身的補丁或文章。優化