Web服務器在面對高併發的狀況下,網絡的IO通常選擇IO複用,像apache選擇的Select/poll。Nginx在linux 2.6後選擇Epoll作網路IO,提升了WEB服務的併發能力。linux
在本章,咱們將看看NGINX如何使用epoll。apache
首先,咱們看一下數據結構圖:數組
一、從結構圖中,咱們先看第一部分,NGINX拿到socket標示符,綁定本地地址,監聽socket標示符信息,因爲NGINX支持多server,支持各個server使用不一樣的端口、不一樣的協議族。如此須要多個socket句柄來支持,NGINX利用ngx_listening_t這個數據結構來保存打開的socket信息而且放置在一個數組裏面。這個過程是在初始化cycle中完成的。服務器
二、關於socket句柄,咱們區分兩類,一類是上一部分說的服務端申請的socket,這一部分socket處理和客戶端鏈接的狀況,若是有新的鏈接進來,將會產生關於這一批socket的事件;第二類,是鏈接創建後,服務端accept客戶端的socket句柄,針對這個句柄,服務端作讀和寫的操做。網絡
咱們再看一下下圖繪製的一個處理流程。數據結構
咱們再經過代碼來看看整個過程併發
分析配置文件,申請socket fdsocket
二、處理事件模塊的初始化高併發
static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle) { ....... //對全部NGX_EVENT_MODULE模塊執行module->actions.init for (m = 0; ngx_modules[m]; m++) { if (ngx_modules[m]->type != NGX_EVENT_MODULE) { continue; } if (ngx_modules[m]->ctx_index != ecf->use) { continue; } module = ngx_modules[m]->ctx; if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; } ...... cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1; #if (NGX_THREADS) rev[i].lock = &c[i].lock; rev[i].own_lock = &c[i].lock; #endif } cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } wev = cycle->write_events; for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1; #if (NGX_THREADS) wev[i].lock = &c[i].lock; wev[i].own_lock = &c[i].lock; #endif } i = cycle->connection_n; next = NULL; do { i--; c[i].data = next; c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; c[i].fd = (ngx_socket_t) -1; next = &c[i]; #if (NGX_THREADS) c[i].lock = 0; #endif } while (i); cycle->free_connections = next; cycle->free_connection_n = cycle->connection_n; /* for each listening socket */ ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { //獲取空餘鏈接,將服務端申請的每一個socket fd佔用一個鏈接, c = ngx_get_connection(ls[i].fd, cycle->log); ...... rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_add_conn(c) == NGX_ERROR) { return NGX_ERROR; } } else { //若是使用epoll模塊,則在epoll上註冊讀事件,操做類型是EPOLL_CTL_ADD,也就是關注服務端申請的socket fd是否有新鏈接申請 if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } } return NGX_OK; }
三、 EPOLL在發現新事件後,會查找處理該事件的方法,實際上先查找對應的鏈接,該鏈接指針是以下方式,利用epoll_event結構體成員data的ptr指針能夠用來存儲用戶的鏈接信息,咱們在看看後面是怎麼處理的ui
static ngx_int_t ngx_epoll_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags) { ....... //epoll_event結構體成員data的ptr指針能夠用來存儲用戶的指針信息,這裏用來存儲鏈接信息,其中事件的instance標識主要用來顯示事件是否過時 ee.events = events | (uint32_t) flags; ee.data.ptr = (void *) ((uintptr_t) c | ev->instance); //把instance加到ee的末尾(因爲內存對齊,末尾通常爲0) ……. if (epoll_ctl(ep, op, c->fd, &ee) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno, "epoll_ctl(%d, %d) failed", op, c->fd); return NGX_ERROR; } ....... return NGX_OK; }
四、
static void ngx_worker_process_cycle(ngx_cycle_t *cycle, void *data) { ...... ngx_worker_process_init(cycle, worker); ...... for ( ;; ) { ...... ngx_process_events_and_timers(cycle); ...... } }
五、
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ...... (void) ngx_process_events(cycle, timer, flags); ...... }