轉載:https://www.nowcoder.com/discuss/26226?type=0&order=0&pos=27&page=1node
/* * fs/eventpoll.c (Efficient event retrieval implementation) * Copyright (C) 2001,...,2009 Davide Libenzi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Davide Libenzi <davidel@xmailserver.org> * */ /* * 在深刻了解epoll的實現以前, 先來了解內核的3個方面. * 1. 等待隊列 waitqueue * 咱們簡單解釋一下等待隊列: * 隊列頭(wait_queue_head_t)每每是資源生產者, * 隊列成員(wait_queue_t)每每是資源消費者, * 當頭的資源ready後, 會逐個執行每一個成員指定的回調函數, * 來通知它們資源已經ready了, 等待隊列大體就這個意思. * 2. 內核的poll機制 * 被Poll的fd, 必須在實現上支持內核的Poll技術, * 好比fd是某個字符設備,或者是個socket, 它必須實現 * file_operations中的poll操做, 給本身分配有一個等待隊列頭. * 主動poll fd的某個進程必須分配一個等待隊列成員, 添加到 * fd的對待隊列裏面去, 並指定資源ready時的回調函數. * 用socket作例子, 它必須有實現一個poll操做, 這個Poll是 * 發起輪詢的代碼必須主動調用的, 該函數中必須調用poll_wait(), * poll_wait會將發起者做爲等待隊列成員加入到socket的等待隊列中去. * 這樣socket發生狀態變化時能夠經過隊列頭逐個通知全部關心它的進程. * 這一點必須很清楚的理解, 不然會想不明白epoll是如何 * 得知fd的狀態發生變化的. * 3. epollfd自己也是個fd, 因此它自己也能夠被epoll, * 能夠猜想一下它是否是能夠無限嵌套epoll下去... * * epoll基本上就是使用了上面的1,2點來完成. * 可見epoll自己並無給內核引入什麼特別複雜或者高深的技術, * 只不過是已有功能的從新組合, 達到了超過select的效果. */ /* * 相關的其它內核知識: * 1. fd咱們知道是文件描述符, 在內核態, 與之對應的是struct file結構, * 能夠看做是內核態的文件描述符. * 2. spinlock, 自旋鎖, 必需要很是當心使用的鎖, * 尤爲是調用spin_lock_irqsave()的時候, 中斷關閉, 不會發生進程調度, * 被保護的資源其它CPU也沒法訪問. 這個鎖是很強力的, 因此只能鎖一些 * 很是輕量級的操做. * 3. 引用計數在內核中是很是重要的概念, * 內核代碼裏面常常有些release, free釋放資源的函數幾乎不加任何鎖, * 這是由於這些函數每每是在對象的引用計數變成0時被調用, * 既然沒有進程在使用在這些對象, 天然也不須要加鎖. * struct file 是持有引用計數的. */ /* --- epoll相關的數據結構 --- */ /* * This structure is stored inside the "private_data" member of the file * structure and rapresent the main data sructure for the eventpoll * interface. */ /* 每建立一個epollfd, 內核就會分配一個eventpoll與之對應, 能夠說是 * 內核態的epollfd. */ struct eventpoll { /* Protect the this structure access */ spinlock_t lock; /* * This mutex is used to ensure that files are not removed * while epoll is using them. This is held during the event * collection loop, the file cleanup path, the epoll file exit * code and the ctl operations. */ /* 添加, 修改或者刪除監聽fd的時候, 以及epoll_wait返回, 向用戶空間 * 傳遞數據時都會持有這個互斥鎖, 因此在用戶空間能夠放心的在多個線程 * 中同時執行epoll相關的操做, 內核級已經作了保護. */ struct mutex mtx; /* Wait queue used by sys_epoll_wait() */ /* 調用epoll_wait()時, 咱們就是"睡"在了這個等待隊列上... */ wait_queue_head_t wq; /* Wait queue used by file->poll() */ /* 這個用於epollfd本事被poll的時候... */ wait_queue_head_t poll_wait; /* List of ready file descriptors */ /* 全部已經ready的epitem都在這個鏈表裏面 */ struct list_head rdllist; /* RB tree root used to store monitored fd structs */ /* 全部要監聽的epitem都在這裏 */ struct rb_root rbr; /* 這是一個單鏈表連接着全部的struct epitem當event轉移到用戶空間時 */ * This is a single linked list that chains all the "struct epitem" that * happened while transfering ready events to userspace w/out * holding ->lock. */ struct epitem *ovflist; /* The user that created the eventpoll descriptor */ /* 這裏保存了一些用戶變量, 好比fd監聽數量的最大值等等 */ struct user_struct *user; }; /* * Each file descriptor added to the eventpoll interface will * have an entry of this type linked to the "rbr" RB tree. */ /* epitem 表示一個被監聽的fd */ struct epitem { /* RB tree node used to link this structure to the eventpoll RB tree */ /* rb_node, 當使用epoll_ctl()將一批fds加入到某個epollfd時, 內核會分配 * 一批的epitem與fds們對應, 並且它們以rb_tree的形式組織起來, tree的root * 保存在epollfd, 也就是struct eventpoll中. * 在這裏使用rb_tree的緣由我認爲是提升查找,插入以及刪除的速度. * rb_tree對以上3個操做都具備O(lgN)的時間複雜度 */ struct rb_node rbn; /* List header used to link this structure to the eventpoll ready list */ /* 鏈表節點, 全部已經ready的epitem都會被鏈到eventpoll的rdllist中 */ struct list_head rdllink; /* * Works together "struct eventpoll"->ovflist in keeping the * single linked chain of items. */ /* 這個在代碼中再解釋... */ struct epitem *next; /* The file descriptor information this item refers to */ /* epitem對應的fd和struct file */ struct epoll_filefd ffd; /* Number of active wait queue attached to poll operations */ int nwait; /* List containing poll wait queues */ struct list_head pwqlist; /* The "container" of this item */ /* 當前epitem屬於哪一個eventpoll */ struct eventpoll *ep; /* List header used to link this item to the "struct file" items list */ struct list_head fllink; /* The structure that describe the interested events and the source fd */ /* 當前的epitem關係哪些events, 這個數據是調用epoll_ctl時從用戶態傳遞過來 */ struct epoll_event event; }; struct epoll_filefd { struct file *file; int fd; }; /* poll所用到的鉤子Wait structure used by the poll hooks */ struct eppoll_entry { /* List header used to link this structure to the "struct epitem" */ struct list_head llink; /* The "base" pointer is set to the container "struct epitem" */ struct epitem *base; /* * Wait queue item that will be linked to the target file wait * queue head. */ wait_queue_t wait; /* The wait queue head that linked the "wait" wait queue item */ wait_queue_head_t *whead; }; /* Wrapper struct used by poll queueing */ struct ep_pqueue { poll_table pt; struct epitem *epi; }; /* Used by the ep_send_events() function as callback private data */ struct ep_send_events_data { int maxevents; struct epoll_event __user *events; }; /* --- 代碼註釋 --- */ /* 你沒看錯, 這就是epoll_create()的真身, 基本啥也不幹直接調用epoll_create1了, * 另外你也能夠發現, size這個參數實際上是沒有任何用處的... */ SYSCALL_DEFINE1(epoll_create, int, size) { if (size <= 0) return -EINVAL; return sys_epoll_create1(0); } /* 這纔是真正的epoll_create啊~~ */ SYSCALL_DEFINE1(epoll_create1, int, flags) { int error; struct eventpoll *ep = NULL;//主描述符 /* Check the EPOLL_* constant for consistency. */ /* 這句沒啥用處... */ BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); /* 對於epoll來說, 目前惟一有效的FLAG就是CLOEXEC */ if (flags & ~EPOLL_CLOEXEC) return -EINVAL; /* * Create the internal data structure ("struct eventpoll"). */ /* 分配一個struct eventpoll, 分配和初始化細節咱們隨後深聊~ */ error = ep_alloc(&ep); if (error < 0) return error; /* * Creates all the items needed to setup an eventpoll file. That is, * a file structure and a free file descriptor. */ /* 這裏是建立一個匿名fd, 提及來就話長了...長話短說: * epollfd自己並不存在一個真正的文件與之對應, 因此內核須要建立一個 * "虛擬"的文件, 併爲之分配真正的struct file結構, 並且有真正的fd. * 這裏2個參數比較關鍵: * eventpoll_fops, fops就是file operations, 就是當你對這個文件(這裏是虛擬的)進行操做(好比讀)時, * fops裏面的函數指針指向真正的操做實現, 相似C++裏面虛函數和子類的概念. * epoll只實現了poll和release(就是close)操做, 其它文件系統操做都有VFS全權處理了. * ep, ep就是struct epollevent, 它會做爲一個私有數據保存在struct file的private指針裏面. * 其實說白了, 就是爲了能經過fd找到struct file, 經過struct file能找到eventpoll結構. * 若是懂一點Linux下字符設備驅動開發, 這裏應該是很好理解的, * 推薦閱讀 <Linux device driver 3rd> */ error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, O_RDWR | (flags & O_CLOEXEC)); if (error < 0) ep_free(ep); return error; } /* * 建立好epollfd後, 接下來咱們要往裏面添加fd咯 * 來看epoll_ctl * epfd 就是epollfd * op ADD,MOD,DEL * fd 須要監聽的描述符 * event 咱們關心的events */ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; struct epoll_event epds; error = -EFAULT; /* * 錯誤處理以及從用戶空間將epoll_event結構copy到內核空間. */ if (ep_op_has_event(op) && copy_from_user(&epds, event, sizeof(struct epoll_event))) goto error_return; /* Get the "struct file *" for the eventpoll file */ /* 取得struct file結構, epfd既然是真正的fd, 那麼內核空間 * 就會有與之對於的一個struct file結構 * 這個結構在epoll_create1()中, 由函數anon_inode_getfd()分配 */ error = -EBADF; file = fget(epfd); if (!file) goto error_return; /* Get the "struct file *" for the target file */ /* 咱們須要監聽的fd, 它固然也有個struct file結構, 上下2個不要搞混了哦 */ tfile = fget(fd); if (!tfile) goto error_fput; /* The target file descriptor must support poll */ error = -EPERM; /* 若是監聽的文件不支持poll, 那就沒轍了. * 你知道什麼狀況下, 文件會不支持poll嗎? */ if (!tfile->f_op || !tfile->f_op->poll) goto error_tgt_fput; /* * We have to check that the file structure underneath the file descriptor * the user passed to us _is_ an eventpoll file. And also we do not permit * adding an epoll file descriptor inside itself. */ error = -EINVAL; /* epoll不能本身監聽本身... */ if (file == tfile || !is_file_epoll(file)) goto error_tgt_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ /* 取到咱們的eventpoll結構, 來自與epoll_create1()中的分配 */ ep = file->private_data; /* 接下來的操做有可能修改數據結構內容, 鎖之~ */ mutex_lock(&ep->mtx); /* * Try to lookup the file inside our RB tree, Since we grabbed "mtx" * above, we can be sure to be able to use the item looked up by * ep_find() till we release the mutex. */ /* 對於每個監聽的fd, 內核都有分配一個epitem結構, * 並且咱們也知道, epoll是不容許重複添加fd的, * 因此咱們首先查找該fd是否是已經存在了. * ep_find()其實就是RBTREE查找, 跟C++STL的map差很少一回事, O(lgn)的時間複雜度. */ epi = ep_find(ep, tfile, fd); error = -EINVAL; switch (op) { /* 首先咱們關心添加 */ case EPOLL_CTL_ADD: if (!epi) { /* 以前的find沒有找到有效的epitem, 證實是第一次插入, 接受! * 這裏咱們能夠知道, POLLERR和POLLHUP事件內核老是會關心的 * */ epds.events |= POLLERR | POLLHUP; /* rbtree插入, 詳情見ep_insert()的分析 * 其實我以爲這裏有insert的話, 以前的find應該 * 是能夠省掉的... */ error = ep_insert(ep, &epds, tfile, fd); } else /* 找到了!? 重複添加! */ error = -EEXIST; break; /* 刪除和修改操做都比較簡單 */ case EPOLL_CTL_DEL: if (epi) error = ep_remove(ep, epi); else error = -ENOENT; break; case EPOLL_CTL_MOD: if (epi) { epds.events |= POLLERR | POLLHUP; error = ep_modify(ep, epi, &epds); } else error = -ENOENT; break; } mutex_unlock(&ep->mtx); error_tgt_fput: fput(tfile); error_fput: fput(file); error_return: return error; } /* 分配一個eventpoll結構 */ static int ep_alloc(struct eventpoll **pep) { int error; struct user_struct *user; struct eventpoll *ep; /* 獲取當前用戶的一些信息, 好比是否是root啦, 最大監聽fd數目啦 */ user = get_current_user(); error = -ENOMEM; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (unlikely(!ep)) goto free_uid; /* 這些都是初始化啦 */ spin_lock_init(&ep->lock); mutex_init(&ep->mtx); init_waitqueue_head(&ep->wq);//初始化本身睡在的等待隊列 init_waitqueue_head(&ep->poll_wait);//初始化 INIT_LIST_HEAD(&ep->rdllist);//初始化就緒鏈表 ep->rbr = RB_ROOT; ep->ovflist = EP_UNACTIVE_PTR; ep->user = user; *pep = ep; return 0; free_uid: free_uid(user); return error; } /* * Must be called with "mtx" held. */ /* * ep_insert()在epoll_ctl()中被調用, 完成往epollfd裏面添加一個監聽fd的工做 * tfile是fd在內核態的struct file結構 */ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd) { int error, revents, pwake = 0; unsigned long flags; struct epitem *epi; struct ep_pqueue epq; /* 查看是否達到當前用戶的最大監聽數 */ if (unlikely(atomic_read(&ep->user->epoll_watches) >= max_user_watches)) return -ENOSPC; /* 從著名的slab中分配一個epitem */ if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; /* Item initialization follow here ... */ /* 這些都是相關成員的初始化... */ INIT_LIST_HEAD(&epi->rdllink); INIT_LIST_HEAD(&epi->fllink); INIT_LIST_HEAD(&epi->pwqlist); epi->ep = ep; /* 這裏保存了咱們須要監聽的文件fd和它的file結構 */ ep_set_ffd(&epi->ffd, tfile, fd); epi->event = *event; epi->nwait = 0; /* 這個指針的初值不是NULL哦... */ epi->next = EP_UNACTIVE_PTR; /* Initialize the poll table using the queue callback */ /* 好, 咱們終於要進入到poll的正題了 */ epq.epi = epi; /* 初始化一個poll_table * 其實就是指定調用poll_wait(注意不是epoll_wait!!!)時的回調函數,和咱們關心哪些events, * ep_ptable_queue_proc()就是咱們的回調啦, 初值是全部event都關心 */ init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); /* * Attach the item to the poll hooks and get current event bits. * We can safely use the file* here because its usage count has * been increased by the caller of this function. Note that after * this operation completes, the poll callback can start hitting * the new item. */ /* 這一部很關鍵, 也比較難懂, 徹底是內核的poll機制致使的... * 首先, f_op->poll()通常來講只是個wrapper, 它會調用真正的poll實現, * 拿UDP的socket來舉例, 這裏就是這樣的調用流程: f_op->poll(), sock_poll(), * udp_poll(), datagram_poll(), sock_poll_wait(), 最後調用到咱們上面指定的 * ep_ptable_queue_proc()這個回調函數...(好深的調用路徑...). * 完成這一步, 咱們的epitem就跟這個socket關聯起來了, 當它有狀態變化時, * 會經過ep_poll_callback()來通知. * 最後, 這個函數還會查詢當前的fd是否是已經有啥event已經ready了, 有的話 * 會將event返回. */ revents = tfile->f_op->poll(tfile, &epq.pt); /* * We have to check if something went wrong during the poll wait queue * install process. Namely an allocation for a wait queue failed due * high memory pressure. */ error = -ENOMEM; if (epi->nwait < 0) goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ /* 這個就是每一個文件會將全部監聽本身的epitem鏈起來 */ spin_lock(&tfile->f_lock); list_add_tail(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ /* 都搞定後, 將epitem插入到對應的eventpoll中去 */ ep_rbtree_insert(ep, epi); /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); /* If the file is already "ready" we drop it inside the ready list */ /* 到達這裏後, 若是咱們監聽的fd已經有事件發生, 那就要處理一下 */ if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { /* 將當前的epitem加入到ready list中去 */ list_add_tail(&epi->rdllink, &ep->rdllist); /* Notify waiting tasks that events are available */ /* 誰在epoll_wait, 就喚醒它... */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); /* 誰在epoll當前的epollfd, 也喚醒它... */ if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); atomic_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 0; error_unregister: ep_unregister_pollwait(ep, epi); /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist * list, since that is used/cleaned only inside a section bound by "mtx". * And ep_insert() is called with "mtx" held. */ spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); kmem_cache_free(epi_cache, epi); return error; } /* * This is the callback that is used to add our wait queue to the * target file wakeup lists. */ /* * 該函數在調用f_op->poll()時會被調用. * 也就是epoll主動poll某個fd時, 用來將epitem與指定的fd關聯起來的. * 關聯的辦法就是使用等待隊列(waitqueue) */ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, poll_table *pt) { struct epitem *epi = ep_item_from_epqueue(pt); struct eppoll_entry *pwq; if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { /* 初始化等待隊列, 指定ep_poll_callback爲喚醒時的回調函數, * 當咱們監聽的fd發生狀態改變時, 也就是隊列頭被喚醒時, * 指定的回調函數將會被調用. */ init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); pwq->whead = whead; pwq->base = epi; /* 將剛分配的等待隊列成員加入到頭中, 頭是由fd持有的 */ add_wait_queue(whead, &pwq->wait); list_add_tail(&pwq->llink, &epi->pwqlist); /* nwait記錄了當前epitem加入到了多少個等待隊列中, * 我認爲這個值最大也只會是1... */ epi->nwait++; } else { /* We have to signal that an error occurred */ epi->nwait = -1; } } /* * This is the callback that is passed to the wait queue wakeup * machanism. It is called by the stored file descriptors when they * have events to report. */ /* * 這個是關鍵性的回調函數, 當咱們監聽的fd發生狀態改變時, 它會被調用. * 參數key被看成一個unsigned long整數使用, 攜帶的是events. */ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; unsigned long flags; struct epitem *epi = ep_item_from_wait(wait);//從等待隊列獲取epitem.須要知道哪一個進程掛載到這個設備 struct eventpoll *ep = epi->ep;//獲取 spin_lock_irqsave(&ep->lock, flags); /* * If the event mask does not contain any poll(2) event, we consider the * descriptor to be disabled. This condition is likely the effect of the * EPOLLONESHOT bit that disables the descriptor when an event is received, * until the next EPOLL_CTL_MOD will be issued. */ if (!(epi->event.events & ~EP_PRIVATE_BITS)) goto out_unlock; /* * Check the events coming with the callback. At this stage, not * every device reports the events in the "key" parameter of the * callback. We need to be able to handle both cases here, hence the * test for "key" != NULL before the event match test. */ /* 沒有咱們關心的event... */ if (key && !((unsigned long) key & epi->event.events)) goto out_unlock; /* * If we are trasfering events to userspace, we can hold no locks * (because we're accessing user memory, and because of linux f_op->poll() * semantics). All the events that happens during that period of time are * chained in ep->ovflist and requeued later on. */ /* * 這裏看起來可能有點費解, 其實幹的事情比較簡單: * 若是該callback被調用的同時, epoll_wait()已經返回了, * 也就是說, 此刻應用程序有可能已經在循環獲取events, * 這種狀況下, 內核將此刻發生event的epitem用一個單獨的鏈表 * 鏈起來, 不發給應用程序, 也不丟棄, 而是在下一次epoll_wait * 時返回給用戶. */ if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { if (epi->next == EP_UNACTIVE_PTR) { epi->next = ep->ovflist; ep->ovflist = epi; } goto out_unlock; } /* If this file is already in the ready list we exit soon */ /* 將當前的epitem放入ready list */ if (!ep_is_linked(&epi->rdllink)) list_add_tail(&epi->rdllink, &ep->rdllist); /* * Wake up ( if active ) both the eventpoll wait list and the ->poll() * wait list. */ /* 喚醒epoll_wait... */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); /* 若是epollfd也在被poll, 那就喚醒隊列裏面的全部成員. */ if (waitqueue_active(&ep->poll_wait)) pwake++; out_unlock: spin_unlock_irqrestore(&ep->lock, flags); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 1; } /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_wait(2). */ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout) { int error; struct file *file; struct eventpoll *ep; /* The maximum number of event must be greater than zero */ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) return -EINVAL; /* Verify that the area passed by the user is writeable */ /* 這個地方有必要說明一下: * 內核對應用程序採起的策略是"絕對不信任", * 因此內核跟應用程序之間的數據交互大都是copy, 不容許(也時候也是不能...)指針引用. * epoll_wait()須要內核返回數據給用戶空間, 內存由用戶程序提供, * 因此內核會用一些手段來驗證這一段內存空間是否是有效的. */ if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { error = -EFAULT; goto error_return; } /* Get the "struct file *" for the eventpoll file */ error = -EBADF; /* 獲取epollfd的struct file, epollfd也是文件嘛 */ file = fget(epfd); if (!file) goto error_return; /* * We have to check that the file structure underneath the fd * the user passed to us _is_ an eventpoll file. */ error = -EINVAL; /* 檢查一下它是否是一個真正的epollfd... */ if (!is_file_epoll(file)) goto error_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ /* 獲取eventpoll結構 */ ep = file->private_data; /* Time to fish for events ... */ /* OK, 睡覺, 等待事件到來~~ */ error = ep_poll(ep, events, maxevents, timeout); error_fput: fput(file); error_return: return error; } /* 這個函數真正將執行epoll_wait的進程帶入睡眠狀態... */ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { int res, eavail; unsigned long flags; long jtimeout; wait_queue_t wait;//等待隊列 /* * Calculate the timeout by checking for the "infinite" value (-1) * and the overflow condition. The passed timeout is in milliseconds, * that why (t * HZ) / 1000. */ /* 計算睡覺時間, 毫秒要轉換爲HZ */ jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ? MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; retry: spin_lock_irqsave(&ep->lock, flags); res = 0; /* 若是ready list不爲空, 就不睡了, 直接幹活... */ if (list_empty(&ep->rdllist)) { /* * We don't have any available event to return to the caller. * We need to sleep here, and we will be wake up by * ep_poll_callback() when events will become available. */ /* OK, 初始化一個等待隊列, 準備直接把本身掛起, * 注意current是一個宏, 表明當前進程 */ init_waitqueue_entry(&wait, current);//初始化等待隊列,wait表示當前進程 __add_wait_queue_exclusive(&ep->wq, &wait);//掛載到ep結構的等待隊列 for (;;) { /* * We don't want to sleep if the ep_poll_callback() sends us * a wakeup in between. That's why we set the task state * to TASK_INTERRUPTIBLE before doing the checks. */ /* 將當前進程設置位睡眠, 可是能夠被信號喚醒的狀態, * 注意這個設置是"未來時", 咱們此刻還沒睡! */ set_current_state(TASK_INTERRUPTIBLE); /* 若是這個時候, ready list裏面有成員了, * 或者睡眠時間已通過了, 就直接不睡了... */ if (!list_empty(&ep->rdllist) || !jtimeout) break; /* 若是有信號產生, 也起牀... */ if (signal_pending(current)) { res = -EINTR; break; } /* 啥事都沒有,解鎖, 睡覺... */ spin_unlock_irqrestore(&ep->lock, flags); /* jtimeout這個時間後, 會被喚醒, * ep_poll_callback()若是此時被調用, * 那麼咱們就會直接被喚醒, 不用等時間了... * 再次強調一下ep_poll_callback()的調用時機是由被監聽的fd * 的具體實現, 好比socket或者某個設備驅動來決定的, * 由於等待隊列頭是他們持有的, epoll和當前進程 * 只是單純的等待... **/ jtimeout = schedule_timeout(jtimeout);//睡覺 spin_lock_irqsave(&ep->lock, flags); } __remove_wait_queue(&ep->wq, &wait); /* OK 咱們醒來了... */ set_current_state(TASK_RUNNING); } /* Is it worth to try to dig for events ? */ eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; spin_unlock_irqrestore(&ep->lock, flags); /* * Try to transfer events to user space. In case we get 0 events and * there's still timeout left over, we go trying again in search of * more luck. */ /* 若是一切正常, 有event發生, 就開始準備數據copy給用戶空間了... */ if (!res && eavail && !(res = ep_send_events(ep, events, maxevents)) && jtimeout) goto retry; return res; } /* 這個簡單, 咱們直奔下一個... */ static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, int maxevents) { struct ep_send_events_data esed; esed.maxevents = maxevents; esed.events = events; return ep_scan_ready_list(ep, ep_send_events_proc, &esed); } /** * ep_scan_ready_list - Scans the ready list in a way that makes possible for * the scan code, to call f_op->poll(). Also allows for * O(NumReady) performance. * * @ep: Pointer to the epoll private data structure. * @sproc: Pointer to the scan callback. * @priv: Private opaque data passed to the @sproc callback. * * Returns: The same integer error code returned by the @sproc callback. */ static int ep_scan_ready_list(struct eventpoll *ep, int (*sproc)(struct eventpoll *, struct list_head *, void *), void *priv) { int error, pwake = 0; unsigned long flags; struct epitem *epi, *nepi; LIST_HEAD(txlist); /* * We need to lock this because we could be hit by * eventpoll_release_file() and epoll_ctl(). */ mutex_lock(&ep->mtx); /* * Steal the ready list, and re-init the original one to the * empty list. Also, set ep->ovflist to NULL so that events * happening while looping w/out locks, are not lost. We cannot * have the poll callback to queue directly on ep->rdllist, * because we want the "sproc" callback to be able to do it * in a lockless way. */ spin_lock_irqsave(&ep->lock, flags); /* 這一步要注意, 首先, 全部監聽到events的epitem都鏈到rdllist上了, * 可是這一步以後, 全部的epitem都轉移到了txlist上, 而rdllist被清空了, * 要注意哦, rdllist已經被清空了! */ list_splice_init(&ep->rdllist, &txlist); /* ovflist, 在ep_poll_callback()裏面我解釋過, 此時此刻咱們不但願 * 有新的event加入到ready list中了, 保存後下次再處理... */ ep->ovflist = NULL; spin_unlock_irqrestore(&ep->lock, flags); /* * Now call the callback function. */ /* 在這個回調函數裏面處理每一個epitem * sproc 就是 ep_send_events_proc, 下面會註釋到. */ error = (*sproc)(ep, &txlist, priv); spin_lock_irqsave(&ep->lock, flags); /* * During the time we spent inside the "sproc" callback, some * other events might have been queued by the poll callback. * We re-insert them inside the main ready-list here. */ /* 如今咱們來處理ovflist, 這些epitem都是咱們在傳遞數據給用戶空間時 * 監聽到了事件. */ for (nepi = ep->ovflist; (epi = nepi) != NULL; nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { /* * We need to check if the item is already in the list. * During the "sproc" callback execution time, items are * queued into ->ovflist but the "txlist" might already * contain them, and the list_splice() below takes care of them. */ /* 將這些直接放入readylist */ if (!ep_is_linked(&epi->rdllink)) list_add_tail(&epi->rdllink, &ep->rdllist); } /* * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after * releasing the lock, events will be queued in the normal way inside * ep->rdllist. */ ep->ovflist = EP_UNACTIVE_PTR; /* * Quickly re-inject items left on "txlist". */ /* 上一次沒有處理完的epitem, 從新插入到ready list */ list_splice(&txlist, &ep->rdllist); /* ready list不爲空, 直接喚醒... */ if (!list_empty(&ep->rdllist)) { /* * Wake up (if active) both the eventpoll wait list and * the ->poll() wait list (delayed after we release the lock). */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); mutex_unlock(&ep->mtx); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return error; } /* 該函數做爲callbakc在ep_scan_ready_list()中被調用 * head是一個鏈表, 包含了已經ready的epitem, * 這個不是eventpoll裏面的ready list, 而是上面函數中的txlist. */ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct ep_send_events_data *esed = priv; int eventcnt; unsigned int revents; struct epitem *epi; struct epoll_event __user *uevent; /* * We can loop without lock because we are passed a task private list. * Items cannot vanish during the loop because ep_scan_ready_list() is * holding "mtx" during this call. */ /* 掃描整個鏈表... */ for (eventcnt = 0, uevent = esed->events; !list_empty(head) && eventcnt < esed->maxevents;) { /* 取出第一個成員 */ epi = list_first_entry(head, struct epitem, rdllink); /* 而後從鏈表裏面移除 */ list_del_init(&epi->rdllink); /* 讀取events, * 注意events咱們ep_poll_callback()裏面已經取過一次了, 爲啥還要再取? * 1. 咱們固然但願能拿到此刻的最新數據, events是會變的~ * 2. 不是全部的poll實現, 都經過等待隊列傳遞了events, 有可能某些驅動壓根沒傳 * 必須主動去讀取. */ revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & epi->event.events; if (revents) { /* 將當前的事件和用戶傳入的數據都copy給用戶空間, * 就是epoll_wait()後應用程序能讀到的那一堆數據. */ if (__put_user(revents, &uevent->events) || __put_user(epi->event.data, &uevent->data)) { list_add(&epi->rdllink, head); return eventcnt ? eventcnt : -EFAULT; } eventcnt++; uevent++; if (epi->event.events & EPOLLONESHOT) epi->event.events &= EP_PRIVATE_BITS; else if (!(epi->event.events & EPOLLET)) { /* 嘿嘿, EPOLLET和非ET的區別就在這一步之差呀~ * 若是是ET, epitem是不會再進入到readly list, * 除非fd再次發生了狀態改變, ep_poll_callback被調用. * 若是是非ET, 無論你還有沒有有效的事件或者數據, * 都會被從新插入到ready list, 再下一次epoll_wait * 時, 會當即返回, 並通知給用戶空間. 固然若是這個 * 被監聽的fds確實沒事件也沒數據了, epoll_wait會返回一個0, * 空轉一次. */ list_add_tail(&epi->rdllink, &ep->rdllist); } } } return eventcnt; } /* ep_free在epollfd被close時調用, * 釋放一些資源而已, 比較簡單 */ static void ep_free(struct eventpoll *ep) { struct rb_node *rbp; struct epitem *epi; /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) ep_poll_safewake(&ep->poll_wait); /* * We need to lock this because we could be hit by * eventpoll_release_file() while we're freeing the "struct eventpoll". * We do not need to hold "ep->mtx" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but * holding "epmutex" is sufficent here. */ mutex_lock(&epmutex); /* * Walks through the whole tree by unregistering poll callbacks. */ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); ep_unregister_pollwait(ep, epi); } /* * Walks through the whole tree by freeing each "struct epitem". At this * point we are sure no poll callbacks will be lingering around, and also by * holding "epmutex" we can be sure that no file cleanup code will hit * us during this operation. So we can avoid the lock on "ep->lock". */ /* 之因此在關閉epollfd以前不須要調用epoll_ctl移除已經添加的fd, * 是由於這裏已經作了... */ while ((rbp = rb_first(&ep->rbr)) != NULL) { epi = rb_entry(rbp, struct epitem, rbn); ep_remove(ep, epi); } mutex_unlock(&epmutex); mutex_destroy(&ep->mtx); free_uid(ep->user); kfree(ep); } /* File callbacks that implement the eventpoll file behaviour */ static const struct file_operations eventpoll_fops = { .release = ep_eventpoll_release, .poll = ep_eventpoll_poll }; /* Fast test to see if the file is an evenpoll file */ static inline int is_file_epoll(struct file *f) { return f->f_op == &eventpoll_fops; } /* OK, eventpoll我認爲比較重要的函數都註釋完了... */
epoll_createlinux