ngx_event_core_module 模塊是一個事件類型的模塊,它在全部事件模塊中的順序是第一位。它主要完成如下兩點任務:nginx
static ngx_command_t ngx_event_core_commands[] = { /* 鏈接池的大小,也就是每一個 worker 進程中支持的 TCP 最大鏈接數 */ { ngx_string("worker_connections"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_connections, 0, 0, NULL }, /* 肯定選擇哪個事件模塊做爲事件驅動機制 */ { ngx_string("use"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_use, 0, 0, NULL }, /* 對應事件定義 ngx_event_s 結構體的成員 available 字段。對於 epoll 事件驅動模式來講, * 意味着在接收到一個新鏈接事件時,調用 accept 以儘量多地接收鏈接 */ { ngx_string("multi_accept"), NGX_EVENT_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, 0, offsetof(ngx_event_conf_t, multi_accept), NULL }, /* 肯定是否使用 accept_mutex 負載均衡鎖,默認爲開啓 */ { ngx_string("accept_mutex"), NGX_EVENT_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, 0, offsetof(ngx_event_conf_t, accept_mutex), NULL }, /* 啓用 accept_mutex 負載均衡鎖後,延遲 accept_mutex_delay 毫秒後再試圖處理新鏈接事件 */ { ngx_string("accept_mutex_delay"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_conf_set_msec_slot, 0, offsetof(ngx_event_conf_t, accept_mutex_delay), NULL }, /* 須要對來自指定 IP 的 TCP 鏈接打印 debug 級別的調試日誌 */ { ngx_string("debug_connection"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_debug_connection, 0, 0, NULL }, ngx_null_command };
typedef struct { /* 鏈接池的大小 */ ngx_uint_t connections; /* 選用的事件模塊在全部事件模塊中的序號,即 ctx_index 成員 */ ngx_uint_t use; /* 標誌位,爲 1 時表示在接收到一個新鏈接事件時,一次性創建儘量多的鏈接 */ ngx_flag_t multi_accept; /* 標誌位,爲 1 時表示啓用負載均衡鎖 */ ngx_flag_t accept_mutex; /* 負載均衡鎖會使有些 worker 進程在拿不到鎖時延遲創建新鏈接,accept_mutex_delay * 就是這段延遲時間的長度 */ ngx_msec_t accept_mutex_delay; /* 所選用事件模塊的名字,它與 use 成員是匹配的 */ u_char *name; #if (NGX_DEBUG) /* 在 --with-debug 編譯模式下,能夠僅針對某些客戶端創建的鏈接輸出調試級別的日誌, * 而 debug-connection 數組用於保存這些客戶端的地址信息 */ ngx_array_t debug_connection; #endif } ngx_event_conf_t;
static ngx_event_module_t ngx_event_core_module_ctx = { &event_core_name, ngx_event_core_create_conf, /* create configuration */ ngx_event_core_init_conf, /* init configuration */ { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } };
從這能夠看出,ngx_event_core_module 模塊僅實現了 create_conf 方法和 init_conf 方法,這是由於它並不真正負責
TCP 網絡事件的驅動,因此不會實現 ngx_event_actions_t 中的方法。數組
ngx_event_core_create_conf 方法僅是爲 ngx_event_conf_t 結構體分配內存空間,並初始化其成員值。網絡
ngx_event_core_init_conf 方法則會根據系統平臺選擇一個合適的事件處理模塊,此外,還對一些重要的可是沒有在
nginx.conf 中配置的值 進行初始化。負載均衡
static char *ngx_event_core_init_conf(ngx_cycle_t *cycle, void *conf) { TRACE_DEBUG(NGX_LOG_INFO, "ngx_event_core_module: init conf"); ngx_event_conf_t *ecf = conf; #if (NGX_HAVE_EPOLL) && !(NGX_TEST_BUILD_EPOLL) int fd; #endif ngx_int_t i; ngx_module_t *module; ngx_event_module_t *event_module; module = NULL; /* Nginx根據當前系統平臺選擇一個合適的事件處理模塊 */ #if (NGX_HAVE_EPOLL) && !(NGX_TEST_BUILD_EPOLL) fd = epoll_create(100); if (fd != -1) { (void)close(fd); module = &ngx_epoll_module; } else if (ngx_errno != NGX_ENOSYS) { module = &ngx_epoll_module; } #endif #if (NGX_HAVE_DEVPOLL) && !(NGX_TEST_BUILD_DEVPOLL) module = &ngx_devpoll_module; #endif #if (NGX_HAVE_KQUEUE) module = &ngx_kqueue_module; #endif #if (NGX_HAVE_SELECT) if (module == NULL) { module = &ngx_select_module; } #endif if (module == NULL) { for (i = 0; cycle->modules[i]; i++) { if (cycle->modules[i]->type != NGX_EVENT_MODULE) { continue; } event_module = cycle->modules[i]->ctx; if (ngx_strcmp(event_module->name->data, event_core_name.data) == 0) { continue; } module = cycle->modules[i]; break; } } if (module == NULL) { ngx_log_error(NGX_LOG_EMERG, cycle->log, 0, "no events module found"); return NGX_CONF_ERROR; } ngx_conf_init_uint_value(ecf->connections, DEFAULT_CONNEXTIONS); cycle->connection_n = ecf->connections; /* 把該事件處理模塊序號記錄在配置變量ecf->use中 */ ngx_conf_init_uint_value(ecf->use, module->ctx_index); event_module = module->ctx; ngx_conf_init_ptr_value(ecf->name, event_module->name->data); ngx_conf_init_value(ecf->multi_accept, 0); ngx_conf_init_value(ecf->accept_mutex, 0); ngx_conf_init_msec_value(ecf->accept_mutex_delay, 500); return NGX_CONF_OK; }
/* 該模塊會決定使用哪一種事件驅動機制,以及如何管理事件 */ ngx_module_t ngx_event_core_module = { NGX_MODULE_V1, &ngx_event_core_module_ctx, /* module context */ ngx_event_core_commands, /* module directives */ NGX_EVENT_MODULE, /* module type */ NULL, /* init master */ /* 沒有 fork 出 worker 子進程時,會調用該函數 */ ngx_event_module_init, /* init module */ /* fork出子進程後,每個worker進程會在用 ngx_event_core_module 模塊的 * ngx_event_process_init 方法後纔會進入正式的工做循環 */ ngx_event_process_init, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ NULL, /* exit master */ NGX_MODULE_V1_PADDING };
static ngx_int_t ngx_event_module_init(ngx_cycle_t *cycle) { void ***cf; u_char *shared; size_t size, cl; ngx_shm_t shm; ngx_time_t *tp; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; /* 獲取 ngx_events_module 模塊持有的關於事件模塊的總配置項結構體指針數組 */ cf = ngx_get_conf(cycle->conf_ctx, ngx_events_module); /* 在總配置項結構體指針數組中獲取 ngx_event_core_module 模塊的配置項結構體 */ ecf = (*cf)[ngx_event_core_module.ctx_index]; if (!ngx_test_config && ngx_process <= NGX_PROCESS_MASTER) { ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "using the \"%s\" event method", ecf->name); } ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); ngx_timer_resolution = ccf->timer_resolution; #if !(NGX_WIN32) { ngx_int_t limit; struct rlimit rlmt; if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed, ignored"); } else { if (ecf->connections > (ngx_uint_t) rlmt.rlim_cur && (ccf->rlimit_nofile == NGX_CONF_UNSET || ecf->connections > (ngx_uint_t) ccf->rlimit_nofile)) { limit = (ccf->rlimit_nofile == NGX_CONF_UNSET) ? (ngx_int_t) rlmt.rlim_cur : ccf->rlimit_nofile; ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "%ui worker_connections exceed " "open file resource limit: %i", ecf->connections, limit); } } } #endif /* !(NGX_WIN32) */ if (ccf->master == 0) { return NGX_OK; } if (ngx_accept_mutex_ptr) { return NGX_OK; } /* cl should be equal to or greater than cache line size */ cl = 128; size = cl /* ngx_accept_mutex */ + cl /* ngx_connection_counter */ + cl; /* ngx_temp_number */ #if (NGX_STAT_STUB) size += cl /* ngx_stat_accepted */ + cl /* ngx_stat_handled */ + cl /* ngx_stat_requests */ + cl /* ngx_stat_active */ + cl /* ngx_stat_reading */ + cl /* ngx_stat_writing */ + cl; /* ngx_stat_waiting */ #endif shm.size = size; ngx_str_set(&shm.name, "nginx_shared_zone"); shm.log = cycle->log; if (ngx_shm_alloc(&shm) != NGX_OK) { return NGX_ERROR; } shared = shm.addr; ngx_accept_mutex_ptr = (ngx_atomic_t *) shared; ngx_accept_mutex.spin = (ngx_uint_t) -1; if (ngx_shmtx_create(&ngx_accept_mutex, (ngx_shmtx_sh_t *) shared, cycle->lock_file.data) != NGX_OK) { return NGX_ERROR; } ngx_connection_counter = (ngx_atomic_t *) (shared + 1 * cl); (void) ngx_atomic_cmp_set(ngx_connection_counter, 0, 1); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "counter: %p, %uA", ngx_connection_counter, *ngx_connection_counter); ngx_temp_number = (ngx_atomic_t *) (shared + 2 * cl); tp = ngx_timeofday(); ngx_random_number = (tp->msec << 16) + ngx_pid; #if (NGX_STAT_STUB) ngx_stat_accepted = (ngx_atomic_t *) (shared + 3 * cl); ngx_stat_handled = (ngx_atomic_t *) (shared + 4 * cl); ngx_stat_requests = (ngx_atomic_t *) (shared + 5 * cl); ngx_stat_active = (ngx_atomic_t *) (shared + 6 * cl); ngx_stat_reading = (ngx_atomic_t *) (shared + 7 * cl); ngx_stat_writing = (ngx_atomic_t *) (shared + 8 * cl); ngx_stat_waiting = (ngx_atomic_t *) (shared + 9 * cl); #endif return NGX_OK; }
該方法主要初始化了一些變量,尤爲是 ngx_http_stub_status_module 統計模塊使用的一些原子性的統計變量。dom
static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle) { ngx_uint_t m, i; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *next, *old; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; ngx_event_module_t *module; ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module); /* 當打開 accept_mutex 負載均衡鎖,同時使用了 master 模式且 worker 進行數量大於 1 時, * 才正式肯定了進程將使用 accept_mutex 負載均衡鎖。所以,即便咱們在配置文件中指定打開 * accept_mutex 鎖,若是沒有使用 master 模式或者 worker 進程數量等於 1,進程在運行時 * 仍是不會使用負載均衡鎖(既然不存在多個進程去搶一個監聽端口上的鏈接的狀況,天然就不 * 須要均衡多個 worker 進程的負載)*/ if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) { /* 這裏才置位了才明確表示使用負載均衡鎖 */ ngx_use_accept_mutex = 1; ngx_accept_mutex_held = 0; ngx_accept_mutex_delay = ecf->accept_mutex_delay; } else { /* 關閉負載均衡鎖 */ ngx_use_accept_mutex = 0; } #if (NGX_WIN32) /* * disable accept mutex on win32 as it may cause deadlock if * grabbed by a process which can't accept connections */ ngx_use_accept_mutex = 0; #endif /* 初始化紅黑樹實現的定時器 */ ngx_queue_init(&ngx_posted_accept_events); ngx_queue_init(&ngx_posted_events); if (ngx_event_timer_init(cycle->log) == NGX_ERROR) { return NGX_ERROR; } /* 在調用 use 配置項指定的事件模塊中,在 ngx_event_module_t 接口下,ngx_event_actions_t * 中的 init 方法進行這個事件模塊的初始化工做 */ for (m = 0; cycle->modules[m]; m++) { if (cycle->modules[m]->type != NGX_EVENT_MODULE) { continue; } if (cycle->modules[m]->ctx_index != ecf->use) { continue; } module = cycle->modules[m]->ctx; if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; } #if !(NGX_WIN32) /* 若是 nginx.conf 配置文件中設置了 timer_resolution 配置項,即代表須要控制時間 * 精度,這時會調用 setitimer 方法,設置時間間隔爲 timer_resolution 毫秒來回調 * ngx_timer_signal_handler 方法 */ if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) { struct sigaction sa; struct itimerval itv; ngx_memzero(&sa, sizeof(struct sigaction)); /* 在 ngx_timer_signal_handler 方法中僅是對全局變量 ngx_event_timer_alarm * 置 1,表示須要更新時間,在 ngx_event_actions_t 的 process_events 方法中, * 每個事件驅動模塊都須要在 ngx_event_timer_alarm 爲 1 時調用 * ngx_time_update 方法更新系統時間,在更新系統時間結束後須要將 * ngx_event_timer_alarm 置爲 0 */ sa.sa_handler = ngx_timer_signal_handler; sigemptyset(&sa.sa_mask); if (sigaction(SIGALRM, &sa, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "sigaction(SIGALRM) failed"); return NGX_ERROR; } itv.it_interval.tv_sec = ngx_timer_resolution / 1000; itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000; itv.it_value.tv_sec = ngx_timer_resolution / 1000; itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000; if (setitimer(ITIMER_REAL, &itv, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "setitimer() failed"); } } if (ngx_event_flags & NGX_USE_FD_EVENT) { struct rlimit rlmt; if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed"); return NGX_ERROR; } cycle->files_n = (ngx_uint_t) rlmt.rlim_cur; cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n, cycle->log); if (cycle->files == NULL) { return NGX_ERROR; } } #else if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) { ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "the \"timer_resolution\" directive is not supported " "with the configured event method, ignored"); ngx_timer_resolution = 0; } #endif /* 預分配 ngx_connection_t 數組做爲鏈接池,同時將 ngx_cycle_t 結構體中的 * connections 成員指向該數組。數組的個數爲 nginx.conf 配置文件中 * worker_connections 中配置的鏈接數 */ cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; /* 預分配 ngx_event_t 事件數組做爲讀事件池,同時將 ngx_cycle_t 結構體中的 * read_events 成員指向該數組。數組的個數爲 nginx.conf 配置文件中 * worker_connections 裏配置的鏈接數 */ cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1; } /* 預分配 ngx_event_t 事件數組做爲寫事件池,同時將 ngx_cycle_t 結構體中的 * write_events 成員指向該數組。數組的個數爲 nginx.conf 配置文件中 * worker_connections 裏配置的鏈接數 */ cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } wev = cycle->write_events; /* 將全部寫事件池的寫事件置爲關閉狀態,即未使用 */ for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1; } i = cycle->connection_n; next = NULL; /* 按照序號,將上述 3 個數組相應的讀/寫事件設置到每個 ngx_connection_t 鏈接 * 對象中,同時把這些鏈接以 ngx_connection_t 中的 data 成員做爲 next 指針串聯 * 成鏈表,爲下一步設置空閒鏈接鏈表作好準備 */ do { i--; c[i].data = next; c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; c[i].fd = (ngx_socket_t) -1; next = &c[i]; } while (i); /* 將 ngx_cycle_t 結構體中的空閒鏈接鏈表 free_connections 指向 connections 數組 * 的第 1 個元素,也就是上一步全部 ngx_connection_t 鏈接經過 data 成員組成的 * 單鏈表的首部 */ cycle->free_connections = next; cycle->free_connection_n = cycle->connection_n; /* for each listening socket */ /* 在剛剛創建好的鏈接池中,爲全部 ngx_listening_t 監聽對象中 connections 成員 * 分配鏈接,同時對監聽端口的讀事件設置處理方法爲 ngx_event_accept,也就是說, * 有新鏈接事件時將調用 ngx_event_accept 方法創建新鏈接 */ ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { #if (NGX_HAVE_REUSEPORT) if (ls[i].reuseport && ls[i].worker != ngx_worker) { continue; } #endif c = ngx_get_connection(ls[i].fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->type = ls[i].type; c->log = &ls[i].log; c->listening = &ls[i]; ls[i].connection = c; rev = c->read; rev->log = c->log; rev->accept = 1; #if (NGX_HAVE_DEFERRED_ACCEPT) rev->deferred_accept = ls[i].deferred_accept; #endif if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ls[i].previous) { /* * delete the old accept events that were bound to * the old cycle read events array */ old = ls[i].previous->connection; if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT) == NGX_ERROR) { return NGX_ERROR; } old->fd = (ngx_socket_t) -1; } } #if (NGX_WIN32) if (ngx_event_flags & NGX_USE_IOCP_EVENT) { ngx_iocp_conf_t *iocpcf; rev->handler = ngx_event_acceptex; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) { return NGX_ERROR; } ls[i].log.handler = ngx_acceptex_log_error; iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module); if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex) == NGX_ERROR) { return NGX_ERROR; } } else { rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } #else /* 設置讀事件的回調方法 */ rev->handler = (c->type == SOCK_STREAM) ? ngx_event_accept : ngx_event_recvmsg; #if (NGX_HAVE_REUSEPORT) if (ls[i].reuseport) { if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } continue; } #endif if (ngx_use_accept_mutex) { continue; } #if (NGX_HAVE_EPOLLEXCLUSIVE) if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ccf->worker_processes > 1) { if (ngx_add_event(rev, NGX_READ_EVENT, NGX_EXCLUSIVE_EVENT) == NGX_ERROR) { return NGX_ERROR; } continue; } #endif /* 將監聽對象鏈接的讀事件添加到事件驅動模塊中,這樣,epoll 等事件模塊 * 就開始檢測監聽服務,並開始向用戶提供服務了 */ if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } #endif } return NGX_OK; }