本文所引用的源碼所有來自Redis2.8.2版本。 c++
Redis AOF數據持久化機制的實現相關代碼是redis.c, redis.h, aof.c, bio.c, rio.c, config.c git
在閱讀本文以前請先閱讀Redis數據持久化機制AOF原理分析之配置詳解文章,瞭解AOF相關參數的解析,文章連接 github
http://blog.csdn.net/acceptedxukai/article/details/18135219 redis
轉載請註明,文章出自http://blog.csdn.net/acceptedxukai/article/details/18136903 數據庫
下面將介紹AOF數據持久化機制的實現 緩存
Server啓動加載AOF文件數據
Server啓動加載AOF文件數據的執行步驟爲:main() -> initServerConfig() -> loadServerConfig() -> initServer() -> loadDataFromDisk()。initServerConfig()主要爲初始化默認的AOF參數配置;loadServerConfig()加載配置文件redis.conf中AOF的參數配置,覆蓋Server的默認AOF參數配置,若是配置appendonly on,那麼AOF數據持久化功能將被激活,server.aof_state參數被設置爲REDIS_AOF_ON;loadDataFromDisk()判斷server.aof_state == REDIS_AOF_ON,結果爲True就調用loadAppendOnlyFile函數加載AOF文件中的數據,加載的方法就是讀取AOF文件中數據,因爲AOF文件中存儲的數據與客戶端發送的請求格式相同徹底符合Redis的通訊協議,所以Server建立僞客戶端fakeClient,將解析後的AOF文件數據像客戶端請求同樣調用各類指令,cmd->proc(fakeClient),將AOF文件中的數據重現到Redis Server數據庫中。 服務器
Server首先判斷加載AOF文件是由於AOF文件中的數據要比RDB文件中的數據要新。
- /* Function called at startup to load RDB or AOF file in memory. */
- void loadDataFromDisk(void) {
- long long start = ustime();
- if (server.aof_state == REDIS_AOF_ON) {
- if (loadAppendOnlyFile(server.aof_filename) == REDIS_OK)
- redisLog(REDIS_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000);
- } else {
- if (rdbLoad(server.rdb_filename) == REDIS_OK) {
- redisLog(REDIS_NOTICE,"DB loaded from disk: %.3f seconds",
- (float)(ustime()-start)/1000000);
- } else if (errno != ENOENT) {
- redisLog(REDIS_WARNING,"Fatal error loading the DB: %s. Exiting.",strerror(errno));
- exit(1);
- }
- }
- }
在前面一篇關於AOF參數配置的博客遺留了一個問題,server.aof_current_size參數的初始化,下面解決這個疑問。
- int loadAppendOnlyFile(char *filename) {
- struct redisClient *fakeClient;
- FILE *fp = fopen(filename,"r");
- struct redis_stat sb;
- int old_aof_state = server.aof_state;
- long loops = 0;
- //redis_fstat就是fstat64函數,經過fileno(fp)獲得文件描述符,獲取文件的狀態存儲於sb中,
- //具體能夠參考stat函數,st_size就是文件的字節數
- if (fp && redis_fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) {
- server.aof_current_size = 0;
- fclose(fp);
- return REDIS_ERR;
- }
- if (fp == NULL) {//打開文件失敗
- redisLog(REDIS_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
- exit(1);
- }
- /* Temporarily disable AOF, to prevent EXEC from feeding a MULTI
- * to the same file we're about to read. */
- server.aof_state = REDIS_AOF_OFF;
- fakeClient = createFakeClient(); //創建僞終端
- startLoading(fp); // 定義於 rdb.c ,更新服務器的載入狀態
- while(1) {
- int argc, j;
- unsigned long len;
- robj **argv;
- char buf[128];
- sds argsds;
- struct redisCommand *cmd;
- /* Serve the clients from time to time */
- // 有間隔地處理外部請求,ftello()函數獲得文件的當前位置,返回值爲long
- if (!(loops++ % 1000)) {
- loadingProgress(ftello(fp));//保存aof文件讀取的位置,ftellno(fp)獲取文件當前位置
- aeProcessEvents(server.el, AE_FILE_EVENTS|AE_DONT_WAIT);//處理事件
- }
- //按行讀取AOF數據
- if (fgets(buf,sizeof(buf),fp) == NULL) {
- if (feof(fp))//達到文件尾EOF
- break;
- else
- goto readerr;
- }
- //讀取AOF文件中的命令,依照Redis的協議處理
- if (buf[0] != '*') goto fmterr;
- argc = atoi(buf+1);//參數個數
- if (argc < 1) goto fmterr;
- argv = zmalloc(sizeof(robj*)*argc);//參數值
- for (j = 0; j < argc; j++) {
- if (fgets(buf,sizeof(buf),fp) == NULL) goto readerr;
- if (buf[0] != '$') goto fmterr;
- len = strtol(buf+1,NULL,10);//每一個bulk的長度
- argsds = sdsnewlen(NULL,len);//新建一個空sds
- //按照bulk的長度讀取
- if (len && fread(argsds,len,1,fp) == 0) goto fmterr;
- argv[j] = createObject(REDIS_STRING,argsds);
- if (fread(buf,2,1,fp) == 0) goto fmterr; /* discard CRLF 跳過\r\n*/
- }
- /* Command lookup */
- cmd = lookupCommand(argv[0]->ptr);
- if (!cmd) {
- redisLog(REDIS_WARNING,"Unknown command '%s' reading the append only file", (char*)argv[0]->ptr);
- exit(1);
- }
- /* Run the command in the context of a fake client */
- fakeClient->argc = argc;
- fakeClient->argv = argv;
- cmd->proc(fakeClient);//執行命令
- /* The fake client should not have a reply */
- redisAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0);
- /* The fake client should never get blocked */
- redisAssert((fakeClient->flags & REDIS_BLOCKED) == 0);
- /* Clean up. Command code may have changed argv/argc so we use the
- * argv/argc of the client instead of the local variables. */
- for (j = 0; j < fakeClient->argc; j++)
- decrRefCount(fakeClient->argv[j]);
- zfree(fakeClient->argv);
- }
- /* This point can only be reached when EOF is reached without errors.
- * If the client is in the middle of a MULTI/EXEC, log error and quit. */
- if (fakeClient->flags & REDIS_MULTI) goto readerr;
- fclose(fp);
- freeFakeClient(fakeClient);
- server.aof_state = old_aof_state;
- stopLoading();
- aofUpdateCurrentSize(); //更新server.aof_current_size,AOF文件大小
- server.aof_rewrite_base_size = server.aof_current_size;
- return REDIS_OK;
- …………
- }
redis_fstat是做者對Linux中fstat64函數的重命名,該仍是就是獲取文件相關的參數信息,具體能夠Google之,sb.st_size就是當前AOF文件的大小。這裏須要知道server.aof_fd即AOF文件描述符,該參數的初始化在initServer()函數中
- void aofUpdateCurrentSize(void) {
- struct redis_stat sb;
- if (redis_fstat(server.aof_fd,&sb) == -1) {
- redisLog(REDIS_WARNING,"Unable to obtain the AOF file length. stat: %s",
- strerror(errno));
- } else {
- server.aof_current_size = sb.st_size;
- }
- }
- /* Open the AOF file if needed. */
- if (server.aof_state == REDIS_AOF_ON) {
- server.aof_fd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644);
- if (server.aof_fd == -1) {
- redisLog(REDIS_WARNING, "Can't open the append-only file: %s",strerror(errno));
- exit(1);
- }
- }
至此,Redis Server啓動加載硬盤中AOF文件數據的操做就成功結束了。
Server數據庫產生新數據如何持久化到硬盤
當客戶端執行Set等修改數據庫中字段的指令時就會形成Server數據庫中數據被修改,這些修改的數據應該被實時更新到AOF文件中,而且也要按照必定的fsync機制刷新到硬盤中,保證數據不會丟失。在上一篇博客中,提到了三種fsync方式:appendfsync always, appendfsync everysec, appendfsync no. 具體體如今server.aof_fsync參數中。 app
首先看當客戶端請求的指令形成數據被修改,Redis是如何將修改數據的指令添加到server.aof_buf中的。 less
call() -> propagate() -> feedAppendOnlyFile(),call()函數判斷執行指令後是否形成數據被修改。
feedAppendOnlyFile函數首先會判斷Server是否開啓了AOF,若是開啓AOF,那麼根據Redis通信協議將修改數據的指令重現成請求的字符串,注意在超時設置的處理方式,接着將字符串append到server.aof_buf中便可。該函數最後兩行代碼須要注意,這纔是重點,若是server.aof_child_pid != -1那麼代表此時Server正在重寫rewrite AOF文件,須要將被修改的數據追加到server.aof_rewrite_buf_blocks鏈表中,等待rewrite結束後,追加到AOF文件中。具體見下面代碼的註釋。
- /* Propagate the specified command (in the context of the specified database id)
- * to AOF and Slaves.
- *
- * flags are an xor between:
- * + REDIS_PROPAGATE_NONE (no propagation of command at all)
- * + REDIS_PROPAGATE_AOF (propagate into the AOF file if is enabled)
- * + REDIS_PROPAGATE_REPL (propagate into the replication link)
- */
- void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc,
- int flags)
- {
- //將cmd指令變更的數據追加到AOF文件中
- if (server.aof_state != REDIS_AOF_OFF && flags & REDIS_PROPAGATE_AOF)
- feedAppendOnlyFile(cmd,dbid,argv,argc);
- if (flags & REDIS_PROPAGATE_REPL)
- replicationFeedSlaves(server.slaves,dbid,argv,argc);
- }
- //cmd指令修改了數據,先將更新的數據寫到server.aof_buf中
- void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int argc) {
- sds buf = sdsempty();
- robj *tmpargv[3];
- /* The DB this command was targeting is not the same as the last command
- * we appendend. To issue a SELECT command is needed. */
- // 當前 db 不是指定的 aof db,經過建立 SELECT 命令來切換數據庫
- if (dictid != server.aof_selected_db) {
- char seldb[64];
- snprintf(seldb,sizeof(seldb),"%d",dictid);
- buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
- (unsigned long)strlen(seldb),seldb);
- server.aof_selected_db = dictid;
- }
- // 將 EXPIRE / PEXPIRE / EXPIREAT 命令翻譯爲 PEXPIREAT 命令
- if (cmd->proc == expireCommand || cmd->proc == pexpireCommand ||
- cmd->proc == expireatCommand) {
- /* Translate EXPIRE/PEXPIRE/EXPIREAT into PEXPIREAT */
- buf = catAppendOnlyExpireAtCommand(buf,cmd,argv[1],argv[2]);
- }// 將 SETEX / PSETEX 命令翻譯爲 SET 和 PEXPIREAT 組合命令
- else if (cmd->proc == setexCommand || cmd->proc == psetexCommand) {
- /* Translate SETEX/PSETEX to SET and PEXPIREAT */
- tmpargv[0] = createStringObject("SET",3);
- tmpargv[1] = argv[1];
- tmpargv[2] = argv[3];
- buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
- decrRefCount(tmpargv[0]);
- buf = catAppendOnlyExpireAtCommand(buf,cmd,argv[1],argv[2]);
- } else {//其餘的指令直接追加
- /* All the other commands don't need translation or need the
- * same translation already operated in the command vector
- * for the replication itself. */
- buf = catAppendOnlyGenericCommand(buf,argc,argv);
- }
- /* Append to the AOF buffer. This will be flushed on disk just before
- * of re-entering the event loop, so before the client will get a
- * positive reply about the operation performed. */
- // 將 buf 追加到服務器的 aof_buf 末尾,在beforeSleep中寫到AOF文件中,而且根據狀況fsync刷新到硬盤
- if (server.aof_state == REDIS_AOF_ON)
- server.aof_buf = sdscatlen(server.aof_buf,buf,sdslen(buf));
- /* If a background append only file rewriting is in progress we want to
- * accumulate the differences between the child DB and the current one
- * in a buffer, so that when the child process will do its work we
- * can append the differences to the new append only file. */
- //若是server.aof_child_pid不爲1,那就說明有快照進程正在寫數據到臨時文件(已經開始rewrite),
- //那麼必須先將這段時間接收到的指令更新的數據先暫時存儲起來,等到快照進程完成任務後,
- //將這部分數據寫入到AOF文件末尾,保證數據不丟失
- //解釋爲何須要aof_rewrite_buf_blocks,當server在進行rewrite時即讀取全部數據庫中的數據,
- //有些數據已經寫到新的AOF文件,可是此時客戶端執行指令又將該值修改了,所以形成了差別
- if (server.aof_child_pid != -1)
- aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf));
- /*這裏說一下server.aof_buf和server.aof_rewrite_buf_blocks的區別
- aof_buf是正常狀況下aof文件打開的時候,會不斷將這份數據寫入到AOF文件中。
- aof_rewrite_buf_blocks 是若是用戶主動觸發了寫AOF文件的命令時,好比 config set appendonly yes命令
- 那麼redis會fork建立一個後臺進程,也就是當時的數據快照,而後將數據寫入到一個臨時文件中去。
- 在此期間發送的命令,咱們須要把它們記錄起來,等後臺進程完成AOF臨時文件寫後,serverCron定時任務
- 感知到這個退出動做,而後就會調用backgroundRewriteDoneHandler進而調用aofRewriteBufferWrite函數,
- 將aof_rewrite_buf_blocks上面的數據,也就是diff數據寫入到臨時AOF文件中,而後再unlink替換正常的AOF文件。
- 所以能夠知道,aof_buf通常狀況下比aof_rewrite_buf_blocks要少,
- 但開始的時候可能aof_buf包含一些後者不包含的前面部分數據。*/
- sdsfree(buf);
- }
Server在每次事件循環以前會調用一次beforeSleep函數,下面看看這個函數作了什麼工做?
經過上面的代碼及註釋能夠發現,beforeSleep函數作了三件事:一、處理過時鍵,二、處理阻塞期間的客戶端請求,三、將server.aof_buf中的數據追加到AOF文件中並fsync刷新到硬盤上,flushAppendOnlyFile函數給定了一個參數force,表示是否強制寫入AOF文件,0表示非強制即支持延遲寫,1表示強制寫入。
- /* This function gets called every time Redis is entering the
- * main loop of the event driven library, that is, before to sleep
- * for ready file descriptors. */
- void beforeSleep(struct aeEventLoop *eventLoop) {
- REDIS_NOTUSED(eventLoop);
- listNode *ln;
- redisClient *c;
- /* Run a fast expire cycle (the called function will return
- * ASAP if a fast cycle is not needed). */
- if (server.active_expire_enabled && server.masterhost == NULL)
- activeExpireCycle(ACTIVE_EXPIRE_CYCLE_FAST);
- /* Try to process pending commands for clients that were just unblocked. */
- while (listLength(server.unblocked_clients)) {
- ln = listFirst(server.unblocked_clients);
- redisAssert(ln != NULL);
- c = ln->value;
- listDelNode(server.unblocked_clients,ln);
- c->flags &= ~REDIS_UNBLOCKED;
- /* Process remaining data in the input buffer. */
- //處理客戶端在阻塞期間接收到的客戶端發送的請求
- if (c->querybuf && sdslen(c->querybuf) > 0) {
- server.current_client = c;
- processInputBuffer(c);
- server.current_client = NULL;
- }
- }
- /* Write the AOF buffer on disk */
- //將server.aof_buf中的數據追加到AOF文件中並fsync到硬盤上
- flushAppendOnlyFile(0);
- }
上述代碼中請關注server.aof_fsync參數,即設置Redis fsync AOF文件到硬盤的策略,若是設置爲AOF_FSYNC_ALWAYS,那麼直接在主進程中fsync,若是設置爲AOF_FSYNC_EVERYSEC,那麼放入後臺線程中fsync,後臺線程的代碼在bio.c中。
- void flushAppendOnlyFile(int force) {
- ssize_t nwritten;
- int sync_in_progress = 0;
- if (sdslen(server.aof_buf) == 0) return;
- // 返回後臺正在等待執行的 fsync 數量
- if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
- sync_in_progress = bioPendingJobsOfType(REDIS_BIO_AOF_FSYNC) != 0;
- // AOF 模式爲每秒 fsync ,而且 force 不爲 1 若是能夠的話,推延沖洗
- if (server.aof_fsync == AOF_FSYNC_EVERYSEC && !force) {
- /* With this append fsync policy we do background fsyncing.
- * If the fsync is still in progress we can try to delay
- * the write for a couple of seconds. */
- // 若是 aof_fsync 隊列裏已經有正在等待的任務
- if (sync_in_progress) {
- // 上一次沒有推遲沖洗過,記錄推延的當前時間,而後返回
- if (server.aof_flush_postponed_start == 0) {
- /* No previous write postponinig, remember that we are
- * postponing the flush and return. */
- server.aof_flush_postponed_start = server.unixtime;
- return;
- } else if (server.unixtime - server.aof_flush_postponed_start < 2) {
- // 容許在兩秒以內的推延沖洗
- /* We were already waiting for fsync to finish, but for less
- * than two seconds this is still ok. Postpone again. */
- return;
- }
- /* Otherwise fall trough, and go write since we can't wait
- * over two seconds. */
- server.aof_delayed_fsync++;
- redisLog(REDIS_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis.");
- }
- }
- /* If you are following this code path, then we are going to write so
- * set reset the postponed flush sentinel to zero. */
- server.aof_flush_postponed_start = 0;
- /* We want to perform a single write. This should be guaranteed atomic
- * at least if the filesystem we are writing is a real physical one.
- * While this will save us against the server being killed I don't think
- * there is much to do about the whole server stopping for power problems
- * or alike */
- // 將 AOF 緩存寫入到文件,若是一切幸運的話,寫入會原子性地完成
- nwritten = write(server.aof_fd,server.aof_buf,sdslen(server.aof_buf));
- if (nwritten != (signed)sdslen(server.aof_buf)) {//出錯
- /* Ooops, we are in troubles. The best thing to do for now is
- * aborting instead of giving the illusion that everything is
- * working as expected. */
- if (nwritten == -1) {
- redisLog(REDIS_WARNING,"Exiting on error writing to the append-only file: %s",strerror(errno));
- } else {
- redisLog(REDIS_WARNING,"Exiting on short write while writing to "
- "the append-only file: %s (nwritten=%ld, "
- "expected=%ld)",
- strerror(errno),
- (long)nwritten,
- (long)sdslen(server.aof_buf));
- if (ftruncate(server.aof_fd, server.aof_current_size) == -1) {
- redisLog(REDIS_WARNING, "Could not remove short write "
- "from the append-only file. Redis may refuse "
- "to load the AOF the next time it starts. "
- "ftruncate: %s", strerror(errno));
- }
- }
- exit(1);
- }
- server.aof_current_size += nwritten;
- /* Re-use AOF buffer when it is small enough. The maximum comes from the
- * arena size of 4k minus some overhead (but is otherwise arbitrary). */
- // 若是 aof 緩存不是太大,那麼重用它,不然,清空 aof 緩存
- if ((sdslen(server.aof_buf)+sdsavail(server.aof_buf)) < 4000) {
- sdsclear(server.aof_buf);
- } else {
- sdsfree(server.aof_buf);
- server.aof_buf = sdsempty();
- }
- /* Don't fsync if no-appendfsync-on-rewrite is set to yes and there are
- * children doing I/O in the background. */
- //aof rdb子進程運行中不支持fsync而且aof rdb子進程正在運行,那麼直接返回,
- //可是數據已經寫到aof文件中,只是沒有刷新到硬盤
- if (server.aof_no_fsync_on_rewrite &&
- (server.aof_child_pid != -1 || server.rdb_child_pid != -1))
- return;
- /* Perform the fsync if needed. */
- if (server.aof_fsync == AOF_FSYNC_ALWAYS) {//老是fsync,那麼直接進行fsync
- /* aof_fsync is defined as fdatasync() for Linux in order to avoid
- * flushing metadata. */
- aof_fsync(server.aof_fd); /* Let's try to get this data on the disk */
- server.aof_last_fsync = server.unixtime;
- } else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
- server.unixtime > server.aof_last_fsync)) {
- if (!sync_in_progress) aof_background_fsync(server.aof_fd);//放到後臺線程進行fsync
- server.aof_last_fsync = server.unixtime;
- }
- }
小結
文章寫到這,已經解決的了Redis Server啓動加載AOF文件和如何將客戶端請求產生的新的數據追加到AOF文件中,對於追加數據到AOF文件中,根據fsync的配置策略如何將寫入到AOF文件中的新數據刷新到硬盤中,直接在主進程中fsync或是在後臺線程fsync。
至此,AOF數據持久化還剩下如何rewrite AOF,接受客戶端發送的BGREWRITEAOF請求,此部份內容待下篇博客中解析。
感謝此篇博客給我在理解Redis AOF數據持久化方面的巨大幫助,http://chenzhenianqing.cn/articles/786.html
本人Redis-2.8.2的源碼註釋已經放到Github中,有須要的讀者能夠下載,我也會在後續的時間中更新,https://github.com/xkeyideal/annotated-redis-2.8.2
本人不怎麼會使用Git,望有人能教我一下。