linux內核中提供了流量控制的相關處理功能,相關代碼在net/sched目錄下;而應用層上的控制是經過iproute2軟件包中的tc來實現, tc和sched的關係就好象iptables和netfilter的關係同樣,一個是用戶層接口,一個是具體實現. 流控包括幾個部分: 流控算法, 一般在net/sched/sch_*.c中實現, 缺省的是FIFO, 是比較典型的黑盒模式, 對外只看到入隊和出對兩個操做; 流控結構的操做處理; 和用戶空間的控制接口, 是經過rtnetlink實現的。 如下內核代碼版本爲2.6.24. [數據結構] 流控處理對外表現是一個黑盒,外部只能看到數據入隊和出隊,但內部隊列是如何操做和管理外面是不知道的; 另外處理隊列處理外,流控還有一個調度器,該調度器將數據進行分類,而後對不一樣類型的數據採起不一樣的流控處理, 所分的類型多是多級的,造成一個樹型的分類樹。 流控的基本數據結構 struct Qdisc { int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);//入隊操做 struct sk_buff * (*dequeue)(struct Qdisc *dev);//出隊操做 unsigned flags;//標誌 #define TCQ_F_BUILTIN 1 #define TCQ_F_THROTTLED 2 #define TCQ_F_INGRESS 4 int padded; struct Qdisc_ops *ops;//Qdisc的基本操做結構 u32 handle;//句柄,用於查找 u32 parent; atomic_t refcnt; struct sk_buff_head q;//數據包鏈表頭 struct net_device *dev;//網卡設備 struct list_head list; //統計信息 struct gnet_stats_basic bstats; struct gnet_stats_queue qstats; //速率估計 struct gnet_stats_rate_est rate_est; //流控鎖 spinlock_t *stats_lock; struct rcu_head q_rcu; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); /* This field is deprecated, but it is still used by CBQ * and it will live until better solution will be invented. */ struct Qdisc *__parent;//父節點, 但基本已經被淘汰了 }; 流控隊列的基本操做結構 struct Qdisc_ops { struct Qdisc_ops *next;//鏈表中的下一個 struct Qdisc_class_ops *cl_ops;//類別操做結構 char id[IFNAMSIZ];//Qdisc的名稱, 從數組大小看應該就是網卡名稱 int priv_size;//私有數據大小 int (*enqueue)(struct sk_buff *, struct Qdisc *);//入隊 struct sk_buff * (*dequeue)(struct Qdisc *);//出隊 int (*requeue)(struct sk_buff *, struct Qdisc *);//將數據包從新排隊 unsigned int (*drop)(struct Qdisc *);//丟棄 int (*init)(struct Qdisc *, struct rtattr *arg);//初始化 void (*reset)(struct Qdisc *);//復位爲初始狀態,釋放緩衝,刪除定時器,清空計數器 void (*destroy)(struct Qdisc *);//釋放 int (*change)(struct Qdisc *, struct rtattr *arg);//更改Qdisc參數 //輸出 int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; }; 流控隊列類別操做結構 struct Qdisc_class_ops { /* Child qdisc manipulation */ int (*graft)(struct Qdisc *, unsigned long cl, struct Qdisc *, struct Qdisc **);//減子節點 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);//增長子節點 void (*qlen_notify)(struct Qdisc *, unsigned long); /* Class manipulation routines */ unsigned long (*get)(struct Qdisc *, u32 classid);//獲取, 增長使用計數 void (*put)(struct Qdisc *, unsigned long);//釋放, 減小使用計數 int (*change)(struct Qdisc *, u32, u32, struct rtattr **, unsigned long *);//改變 int (*delete)(struct Qdisc *, unsigned long);//刪除 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);//遍歷 /* Filter manipulation */ struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid);//tc捆綁 void (*unbind_tcf)(struct Qdisc *, unsigned long);//tc解除 /* rtnetlink specific */ //輸出 int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *skb, struct tcmsg*); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); }; 流控速率控制表結構 struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; struct qdisc_rate_table *next; int refcnt; }; 在這結構中有一些相關的流控制數據結構 struct net_device { ...... /* * Cache line mostly used on queue transmit path (qdisc) */ /* device queue lock */ spinlock_t queue_lock ____cacheline_aligned_in_smp; struct Qdisc *qdisc;//發送數據時的隊列處理 struct Qdisc *qdisc_sleeping;//網卡中止時保存網卡活動時的隊列處理方法 struct list_head qdisc_list;//網卡處理的數據隊列鏈表 unsigned long tx_queue_len; //最大隊列長度 /* Max frames per queue allowed */ /* Partially transmitted GSO packet. */ struct sk_buff *gso_skb; /* ingress path synchronizer */ spinlock_t ingress_lock;//輸入流控鎖 struct Qdisc *qdisc_ingress;//這是對於接收數據時的隊列處理 ...... }; [/數據結構] [初始化] 在網卡設備的初始化函數register_netdevice()函數中調用dev_init_scheduler()函數對網卡設備的流控隊列處理進行了初始化, 也就是說每一個網絡網卡設備的qdisc指針都不會是空的: int register_netdevice(struct net_device *dev) { ...... dev_init_scheduler(dev); ...... } void dev_init_scheduler(struct net_device *dev) { qdisc_lock_tree(dev); //處理髮出數據的qdisc是必須的,而處理輸入數據的qdisc_ingress則不是必須的 //缺省狀況下的qdisc dev->qdisc = &noop_qdisc; dev->qdisc_sleeping = &noop_qdisc; INIT_LIST_HEAD(&dev->qdisc_list); qdisc_unlock_tree(dev); dev_watchdog_init(dev); } watchdog初始化 static void dev_watchdog_init(struct net_device *dev) { init_timer(&dev->watchdog_timer); dev->watchdog_timer.data = (unsigned long)dev; dev->watchdog_timer.function = dev_watchdog; } 當咱們用命令ifconfig ethX up時進入到系統的ioctl調用路徑是 sock_ioctl->dev_ioctl->dev_ifsioc->dev_change_flags->dev_open 順便說一下dev_open會調用具體設備的open函數dev->open(dev); ->dev_activate(dev); 固然noop_qdisc中的調度是不可用的, 只進行丟包處理;網卡在激活(dev_open)時會調用 dev_activate()函數從新對qdisc指針賦值,但未對qdisc_ingress賦值. void dev_activate(struct net_device *dev) { /* No queueing discipline is attached to device; create default one i.e. pfifo_fast for devices, which need queueing and noqueue_qdisc for virtual interfaces */ if (dev->qdisc_sleeping == &noop_qdisc) {//若是當前的qdisc_sleeping是noop_qdisc,從新找一個流控操做指針 struct Qdisc *qdisc; if (dev->tx_queue_len) {//前提條件是發送隊列長度非0, 這正常狀況確定知足的 //對dev設備創建fifo處理, 只是缺省的網卡發送策略: FIFO, 先入先出 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops, TC_H_ROOT); if (qdisc == NULL) { printk(KERN_INFO "%s: activation failed\n", dev->name); return; } list_add_tail(&qdisc->list, &dev->qdisc_list); } else { qdisc = &noqueue_qdisc;//爲0,不要入隊出隊處理 } dev->qdisc_sleeping = qdisc;//對qdisc_sleeping賦值 } if (!netif_carrier_ok(dev))//若是如今網線沒插, 返回 /* Delay activation until next carrier-on event */ return; spin_lock_bh(&dev->queue_lock); rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);//將網卡當前的qdisc賦值爲qdisc_sleeping所指的qdisc if (dev->qdisc != &noqueue_qdisc) {//啓動watchdog dev->trans_start = jiffies; dev_watchdog_up(dev); } spin_unlock_bh(&dev->queue_lock); } struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, unsigned int parentid) { struct Qdisc *sch; sch = qdisc_alloc(dev, ops);//分配Qdisc結構 if (IS_ERR(sch)) goto errout; sch->stats_lock = &dev->queue_lock; sch->parent = parentid; if (!ops->init || ops->init(sch, NULL) == 0)//參考下面默認操做結構實現 return sch; qdisc_destroy(sch);//初始化失敗, 釋放Qdisc errout: return NULL; } 分配新的Qdisc結構, Qdisc的操做結構由函數參數指定 struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) { void *p; struct Qdisc *sch; unsigned int size; int err = -ENOBUFS; /* ensure that the Qdisc and the private data are 32-byte aligned */ //#define QDISC_ALIGNTO 32 //#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) //32字節對齊 size = QDISC_ALIGN(sizeof(*sch)); size += ops->priv_size + (QDISC_ALIGNTO - 1);//增長私有數據空間 p = kzalloc(size, GFP_KERNEL); if (!p) goto errout; sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);//確保sch地址是32字節對齊的 sch->padded = (char *) sch - (char *) p;//地址移動後的偏移量 INIT_LIST_HEAD(&sch->list); skb_queue_head_init(&sch->q); //Qdisc結構參數 sch->ops = ops; sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev = dev; dev_hold(dev); atomic_set(&sch->refcnt, 1); return sch; errout: return ERR_PTR(-err); } [/初始化] [輸入流程] 輸入流控好象不是必須的,目前內核須要配置CONFIG_NET_CLS_ACT選項才起做用. int netif_receive_skb(struct sk_buff *skb) { ...... #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); goto ncls; } #endif ...... #ifdef CONFIG_NET_CLS_ACT skb = handle_ing(skb, &pt_prev, &ret, orig_dev); if (!skb) goto out; ncls: #endif ...... } static inline struct sk_buff *handle_ing(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { if (!skb->dev->qdisc_ingress)//若是網卡設備沒有輸入流控處理,參考下面輸入流量控制實現 goto out; if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } else { /* Huh? Why does turning on AF_PACKET affect this? */ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); } switch (ing_filter(skb)) { //輸入處理 case TC_ACT_SHOT: case TC_ACT_STOLEN: kfree_skb(skb); return NULL; } out: skb->tc_verd = 0; return skb; } static int ing_filter(struct sk_buff *skb) { struct Qdisc *q; struct net_device *dev = skb->dev; int result = TC_ACT_OK; u32 ttl = G_TC_RTTL(skb->tc_verd); if (MAX_RED_LOOP < ttl++) { printk(KERN_WARNING "Redir loop detected Dropping packet (%d->%d)\n", skb->iif, dev->ifindex); return TC_ACT_SHOT; } //設置數據包的TC參數 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); spin_lock(&dev->ingress_lock); if ((q = dev->qdisc_ingress) != NULL) result = q->enqueue(skb, q);//數據入隊,參考下面輸入流控制實現 spin_unlock(&dev->ingress_lock); return result; } [/輸入流程] [輸出流程] 數據發出流控處理時,上層的全部處理已經完成,數據包已經交到網卡設備進行發送, 在數據發送時進行相關的流控處理網絡數據的出口函數爲dev_queue_xmit(); int dev_queue_xmit(struct sk_buff *skb) { ...... q = rcu_dereference(dev->qdisc);//獲取網卡的qdisc指針 #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); #endif //若是隊列輸入函數非空, 將數據包入隊 //對於物理網卡設備, 缺省使用的是FIFO qdisc, 該成員函數非空, 只有虛擬網卡纔可能爲空 if (q->enqueue) { /* Grab device queue */ spin_lock(&dev->queue_lock); q = dev->qdisc;//能夠直接訪問dev->qdisc了 if (q->enqueue) { /* reset queue_mapping to zero */ skb_set_queue_mapping(skb, 0); //實現 skb->queue_mapping = 0 rc = q->enqueue(skb, q);//入隊處理 qdisc_run(dev);//運行流控, 出隊操做,發送數據 spin_unlock(&dev->queue_lock); rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; goto out; } spin_unlock(&dev->queue_lock); } ...... } 出隊操做 static inline void qdisc_run(struct net_device *dev) { if (!netif_queue_stopped(dev) && !test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) __qdisc_run(dev); } void __qdisc_run(struct net_device *dev) { do { if (!qdisc_restart(dev)) //發送數據 break; } while (!netif_queue_stopped(dev));//設備沒有中止發送 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); } static inline int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; int ret = NETDEV_TX_BUSY; /* Dequeue packet */ if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))//數據包出隊,主要是skb = q->dequeue(q) return 0; /* And release queue */ spin_unlock(&dev->queue_lock); HARD_TX_LOCK(dev, smp_processor_id()); //根據設備的特性 dev->features & NETIF_F_LLTX,能夠在發送時不上鎖 if (!netif_subqueue_stopped(dev, skb)) //設備子隊列沒有中止 ret = dev_hard_start_xmit(skb, dev);//發送數據 HARD_TX_UNLOCK(dev); spin_lock(&dev->queue_lock); q = dev->qdisc; switch (ret) { case NETDEV_TX_OK: /* Driver sent out skb successfully */ ret = qdisc_qlen(q); //返回隊列中還存在的skb的個數 break; case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, dev, q); //發送鎖已經被cpu獲取致使一個衝突,處理這個衝突 break; default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); //設備忙,從新入隊 q->ops->requeue(skb, q); 從新調度設備,而後返回0 ret = dev_requeue_skb(skb, dev, q); break; } return ret; } [/輸出流程] [默認操做結構實現] PFIFO_FAST是缺省的流控算法,網卡初始化時就是設置該算法爲網卡的流控算法, 算法比較簡單,就在net/sched/sch_generic.c中定義了,沒在單獨文件中定義. static struct Qdisc_ops pfifo_fast_ops = { .id = "pfifo_fast", .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, .requeue = pfifo_fast_requeue, .init = pfifo_fast_init, .reset = pfifo_fast_reset, .dump = pfifo_fast_dump, .owner = THIS_MODULE, }; #define PFIFO_FAST_BANDS 3 初始化 static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) { int prio; struct sk_buff_head *list = qdisc_priv(qdisc);//qdisc私有數據指針, 數據包鏈表頭 //初始化3個鏈表頭 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) skb_queue_head_init(list + prio); return 0; } 入隊 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { //根據數據包的優先級參數挑一個隊列頭準備將數據包插入該隊列 struct sk_buff_head *list = prio2list(skb, qdisc); //若是當前隊列中的數據包數量小於網卡設備容許的輸出隊列的數量則將該數據包插入該隊列 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { qdisc->q.qlen++; return __qdisc_enqueue_tail(skb, qdisc, list); } return qdisc_drop(skb, qdisc);//不然的話丟棄該數據包 } //優先權值到隊列號的變換數組, 該數組體現算法內容, 經過修改該數組能夠調整算法效果 //該數組定義中, 優先值(低4位)爲1,2,3,5時使用2號隊列, 優先值(低4位)爲6,7時使用0號隊列, 其餘值爲1號隊列 //在普通狀況下skb->priority都是0, 因此應該只使用了1號隊列 //這個數組實際是根據RFC1349中定義的TOS類型值定義的, 在該RFC中TOS就是隻有4位有效 static const u8 prio2band[TC_PRIO_MAX+1] = { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 選隊列處理 static inline struct sk_buff_head *prio2list(struct sk_buff *skb, struct Qdisc *qdisc) { struct sk_buff_head *list = qdisc_priv(qdisc);//qdisc私有數據指針, 數據包鏈表頭 //根據數據包的優先權值肯定隊列頭偏移值 //skb->priority是個32位整數, 只使用最後4位 return list + prio2band[skb->priority & TC_PRIO_MAX]; } 出隊 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) { int prio; struct sk_buff_head *list = qdisc_priv(qdisc); //循環3個隊列 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { if (!skb_queue_empty(list + prio)) {//若是隊列非空, 返回隊列頭的那個數據包 qdisc->q.qlen--; return __qdisc_dequeue_head(qdisc, list + prio); } } return NULL; } 重入隊 static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) { qdisc->q.qlen++;//隊列長度遞增 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));//使用標準重入隊函數將數據插回隊列鏈表 } 復位 static void pfifo_fast_reset(struct Qdisc* qdisc) { int prio; struct sk_buff_head *list = qdisc_priv(qdisc); //釋放三個隊列鏈表中的全部數據包 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) __qdisc_reset_queue(qdisc, list + prio); //skb_queue_purge(list); //計數清零 qdisc->qstats.backlog = 0; qdisc->q.qlen = 0; } 輸出當前算法的內容信息, 因爲PFIFO_FAST算法核心就是prio2band數組, 所以就是將該數組內容輸出到數據包供用戶空間獲取 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) { struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };//TC優先權數組結構 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);//將當前prio2band數組內容拷貝到選項數據中 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);//將結構做爲路由屬性複製到數據包中供返回 return skb->len; rtattr_failure: return -1; } [/默認操做結構實現] [用戶內核交互實現] 在net/sched/sch_api.c中有初始化函數 subsys_initcall(pktsched_init);啓動時自動初始化 static int __init pktsched_init(void) { //登記FIFO流控處理, 這是網卡設備基本流控方法, 缺省必有的 register_qdisc(&pfifo_qdisc_ops); register_qdisc(&bfifo_qdisc_ops); proc_net_fops_create(&init_net, "psched", 0, &psched_fops); //在註冊了一些流量控制操做後若是要使用,就要在用戶空間使用命令行工具配置 //而後和內核交互,告訴內核使用新的或改變一些流量控制操做(也就是改變了流量控制算法) //下面就是爲經過rtnetlink和用戶交互而註冊的函數和交互類型 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); //Qdisc操做, 也就是對應tc qdisc add/modify等操做 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); //刪除/獲取Qdisc操做 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);//獲取Qdisc信息, 也就是對應tc qdisc show //類別操做是經過tc class命令來完成的, 當網卡使用的流控算法是可分類的(如HTB, CBQ等)時候使用, 功能是對Qdisc根節點進行劃分, //定義出分類樹, 同時可定義每一個類別的流量限制參數,但具體那些數據屬於哪一類則是經過tc filter命令來實現。 //class操做, 也就是對應tc class add/delete/modify/get等操做 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL); rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL); rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass); return 0; } 建立/修改qdisc static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct tcmsg *tcm; struct rtattr **tca; struct net_device *dev; u32 clid; struct Qdisc *q, *p; int err; replay: /* Reinit, just in case something touches this. */ tcm = NLMSG_DATA(n); //從netlink中取出數據,tc消息指針 tca = arg; clid = tcm->tcm_parent;//父類別id q = p = NULL; if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) //經過索引找設備 return -ENODEV; if (clid) {//指定了父類別ID的狀況 if (clid != TC_H_ROOT) {//若是不是根節點 if (clid != TC_H_INGRESS) { //非ingress節點時, 根據類別ID的高16位查找Qdisc節點 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) return -ENOENT; q = qdisc_leaf(p, clid);//獲取p節點的葉子節點, 將調用p->ops->cl_ops->leaf()函數 } else { /*ingress */ q = dev->qdisc_ingress;//使用設備ingress流控 } } else { q = dev->qdisc_sleeping;//根節點狀況下流控用的是設備的qdisc_sleeping } /* It may be default qdisc, ignore it */ //若是找到的Qdisc的句柄爲0, 放棄q if (q && q->handle == 0) q = NULL; //沒找到Qdisc節點, 或沒在tc消息中指定句柄值, 或者找到的Qdisc句柄和tc消息中的句柄不一樣 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { if (tcm->tcm_handle) {//TC指定了句柄 //若是Qdisc存在但不是更新命令, 返回對象存在錯誤 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) return -EEXIST; if (TC_H_MIN(tcm->tcm_handle))//TC句柄低16位檢測 return -EINVAL; //根據TC句柄查找該設備上的Qdisc, 找不到的話跳轉到建立新節點操做 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) goto create_n_graft; if (n->nlmsg_flags & NLM_F_EXCL)//找到但設置了NLM_F_EXCL排斥標誌, 返回對象存在錯誤 return -EEXIST; //比較TC命令中的算法名稱和Qdisc名稱算法相同 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) return -EINVAL; //檢查算法出現迴環狀況, p是用clid找到的Qdisc if (q == p || (p && check_loop(q, p, 0))) return -ELOOP; //新找到的Qdisc有效, 轉到嫁接操做 atomic_inc(&q->refcnt); goto graft; } else { if (q == NULL)//沒指定TC句柄, 若是沒找到Qdisc, 跳轉到建立新節點 goto create_n_graft; //檢查各類標誌是否衝突, Qdisc名稱是否正確 if ((n->nlmsg_flags&NLM_F_CREATE) && (n->nlmsg_flags&NLM_F_REPLACE) && ((n->nlmsg_flags&NLM_F_EXCL) || (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)))) goto create_n_graft; } } } else {//若是沒指定父類別ID, 從tc消息的句柄來查找Qdisc if (!tcm->tcm_handle) return -EINVAL; q = qdisc_lookup(dev, tcm->tcm_handle); } /* Change qdisc parameters */ //到這裏是屬於Qdisc修改操做 if (q == NULL)//沒找到Qdisc節點, 返回錯誤 return -ENOENT; if (n->nlmsg_flags&NLM_F_EXCL)//找到Qdisc節點, 但設置了NLM_F_EXCL(排斥)標誌, 返回對象存在錯誤 return -EEXIST; //檢查找到的Qdisc節點的名稱和tc中指定的是否匹配 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) return -EINVAL; err = qdisc_change(q, tca);//修改Qdisc參數 if (err == 0) qdisc_notify(skb, n, clid, NULL, q); return err; create_n_graft: if (!(n->nlmsg_flags&NLM_F_CREATE))//若是TC命令中沒有建立標誌, 返回錯誤 return -ENOENT; if (clid == TC_H_INGRESS) //建立輸入流量控制 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent, tca, &err); else //建立輸出流量控制 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle, tca, &err); if (q == NULL) {//建立失敗, 若是不是EAGAIN(重來一次), 返回失敗 if (err == -EAGAIN) goto replay; return err; } graft://嫁接操做 if (1) { struct Qdisc *old_q = NULL; err = qdisc_graft(dev, p, clid, q, &old_q);//新的Qdisc節點添加到父節點做爲葉節點 if (err) { if (q) { qdisc_lock_tree(dev); qdisc_destroy(q); qdisc_unlock_tree(dev); } return err; } qdisc_notify(skb, n, clid, old_q, q);// Qdisc通告 if (old_q) { qdisc_lock_tree(dev); qdisc_destroy(old_q); qdisc_unlock_tree(dev); } } return 0; } 在指定的網卡設備上建立新的Qdisc結構 static struct Qdisc * qdisc_create(struct net_device *dev, u32 parent, u32 handle, struct rtattr **tca, int *errp) { int err; struct rtattr *kind = tca[TCA_KIND-1]; struct Qdisc *sch; struct Qdisc_ops *ops; ops = qdisc_lookup_ops(kind);//查找相關的Qdisc操做結構,根據id #ifdef CONFIG_KMOD if (ops == NULL && kind != NULL) {//若是沒在當前內核中找到,加載相關名稱的Qdisc操做內核模塊 char name[IFNAMSIZ]; if (rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { /* We dropped the RTNL semaphore in order to perform the module load. So, even if we * succeeded in loading the module we have to tell the caller to replay the request. We * indicate this using -EAGAIN. We replay the request because the device may * go away in the mean time. */ rtnl_unlock(); request_module("sch_%s", name);//加載模塊 rtnl_lock(); ops = qdisc_lookup_ops(kind);//從新查找 if (ops != NULL) { /* We will try again qdisc_lookup_ops, so don't keep a reference. */ module_put(ops->owner); err = -EAGAIN; goto err_out; } } } #endif err = -ENOENT; if (ops == NULL) goto err_out; //咱們假定找到的操做是輸入流量控制操做,實現看下面 sch = qdisc_alloc(dev, ops);//分配新的Qdisc結構,咱們已經看到過 if (IS_ERR(sch)) { err = PTR_ERR(sch); goto err_out2; } sch->parent = parent; if (handle == TC_H_INGRESS) {//是針對輸入進行流控 sch->flags |= TCQ_F_INGRESS; sch->stats_lock = &dev->ingress_lock; handle = TC_H_MAKE(TC_H_INGRESS, 0); } else { sch->stats_lock = &dev->queue_lock; if (handle == 0) { handle = qdisc_alloc_handle(dev); //分配一個惟一的據柄 err = -ENOMEM; if (handle == 0) goto err_out3; } } sch->handle = handle; //調用Qdisc操做結構的初始化函數進行初始化,參考下面輸入流量控制實現 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { if (tca[TCA_RATE-1]) { //用戶傳來了速率屬性 //建立預估器 err = gen_new_estimator(&sch->bstats, &sch->rate_est, sch->stats_lock, tca[TCA_RATE-1]); if (err) { /* Any broken qdiscs that would require a ops->reset() here? The qdisc was never * in action so it shouldn't be necessary. */ if (ops->destroy) ops->destroy(sch); goto err_out3; } } qdisc_lock_tree(dev); list_add_tail(&sch->list, &dev->qdisc_list);//將Qdisc結構添加到網卡設備的流控鏈表 qdisc_unlock_tree(dev); return sch; } err_out3: dev_put(dev); kfree((char *) sch - sch->padded); err_out2: module_put(ops->owner); err_out: *errp = err; return NULL; } "嫁接"Qdisc, 將新的Qdisc節點添加到父節點做爲葉節點 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, u32 classid, struct Qdisc *new, struct Qdisc **old) { int err = 0; struct Qdisc *q = *old; if (parent == NULL) {//父qdisc節點爲空, 將新節點做爲dev的基本qdisc, 返回dev原來的老的qdisc if (q && q->flags & TCQ_F_INGRESS) { *old = dev_graft_qdisc(dev, q); } else { *old = dev_graft_qdisc(dev, new); } } else {//父qdisc非空狀況 //將使用Qdisc類操做結構中的相關成員函數來完成操做 struct Qdisc_class_ops *cops = parent->ops->cl_ops; err = -EINVAL; if (cops) { unsigned long cl = cops->get(parent, classid);//獲取類別句柄值 if (cl) { //類別有效, 調用graft成員函數將新節點插入qdisc樹中 err = cops->graft(parent, cl, new, old); cops->put(parent, cl); } } } return err; } 將qdisc做爲頂層Qdisc節點附着於dev設備 static struct Qdisc * dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) { struct Qdisc *oqdisc; if (dev->flags & IFF_UP)//若是網卡設備是啓動的, 先停掉 dev_deactivate(dev); qdisc_lock_tree(dev);//加樹鎖 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {//是處理輸入的流控節點 oqdisc = dev->qdisc_ingress; /* Prune old scheduler */ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { /* delete */ qdisc_reset(oqdisc); dev->qdisc_ingress = NULL; } else { /* new */ dev->qdisc_ingress = qdisc; //第一次安裝 } } else {//處理輸出 oqdisc = dev->qdisc_sleeping; /* Prune old scheduler */ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) qdisc_reset(oqdisc); /* ... and graft new one */ if (qdisc == NULL) qdisc = &noop_qdisc; dev->qdisc_sleeping = qdisc;//將睡眠qdisc(dev啓動時將使用的qdisc)賦值爲新的qdisc dev->qdisc = &noop_qdisc; } qdisc_unlock_tree(dev); if (dev->flags & IFF_UP) dev_activate(dev);//激活網卡 return oqdisc;//返回dev設備的老的qdisc } 獲取/刪除qdisc static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct tcmsg *tcm = NLMSG_DATA(n); struct rtattr **tca = arg; struct net_device *dev; u32 clid = tcm->tcm_parent; struct Qdisc *q = NULL; struct Qdisc *p = NULL; int err; //根據TC參數中的網卡索引號查找網卡設備 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return -ENODEV; if (clid) {//根據類別ID或TC句柄查找Qdisc, 和上面函數相似 if (clid != TC_H_ROOT) { if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) return -ENOENT; q = qdisc_leaf(p, clid); } else { q = dev->qdisc_ingress; } } else { q = dev->qdisc_sleeping; } if (!q) return -ENOENT; if (tcm->tcm_handle && q->handle != tcm->tcm_handle) return -EINVAL; } else { if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) return -ENOENT; } //檢查找到的Qdisc名稱和TC命令中指定的是否一致 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) return -EINVAL; if (n->nlmsg_type == RTM_DELQDISC) {//刪除Qdisc操做 if (!clid)//必須指定類別ID return -EINVAL; if (q->handle == 0)//若是找到的Qdisc句柄爲0, 返回錯誤 return -ENOENT; //進行Qdisc嫁接操做, 新節點是NULL, 即將葉子節點替換爲NULL, 即刪除了原葉子節點 //原葉子節點返回到q if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) return err; if (q) {//釋放原葉子節點 qdisc_notify(skb, n, clid, q, NULL); qdisc_lock_tree(dev); qdisc_destroy(q); qdisc_unlock_tree(dev); } } else {//非刪除操做, 通告一下, q做爲得到的Qdisc參數返回 qdisc_notify(skb, n, clid, NULL, q); } return 0; } 輸出網卡qdisc參數 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) { int idx, q_idx; int s_idx, s_q_idx; struct net_device *dev; struct Qdisc *q; s_idx = cb->args[0];//起始網卡索引 s_q_idx = q_idx = cb->args[1];//起始Qdisc索引 read_lock(&dev_base_lock); idx = 0; //遍歷全部網卡 for_each_netdev(&init_net, dev) { //索引值小於所提供的起始索引值, 跳過這個索引和網卡的索引號應該沒啥關係 if (idx < s_idx) goto cont; if (idx > s_idx)//索引值大於所提供的起始索引值, 將起始Qdisc索引清零 s_q_idx = 0; q_idx = 0; list_for_each_entry(q, &dev->qdisc_list, list) {//遍歷該網卡設備的全部Qdisc //當前Qdisc索引小於起始Qdisc索引, 跳過 if (q_idx < s_q_idx) { q_idx++; continue; } //填充Qdisc信息到數據包 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; q_idx++; } cont: idx++; } done: read_unlock(&dev_base_lock); //返回處理的全部網卡數和Qdisc數 cb->args[0] = idx; cb->args[1] = q_idx; return skb->len; } 類別控制操做 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct tcmsg *tcm = NLMSG_DATA(n); struct rtattr **tca = arg; struct net_device *dev; struct Qdisc *q = NULL; struct Qdisc_class_ops *cops; unsigned long cl = 0; unsigned long new_cl; u32 pid = tcm->tcm_parent; u32 clid = tcm->tcm_handle; u32 qid = TC_H_MAJ(clid);//qdisc id: 初始化位類別id的高16位 int err; //根據TC信息中的網卡索引值查找網卡 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return -ENODEV; /* 如下是tc class的parent參數取值的說明 parent == TC_H_UNSPEC - unspecified parent. parent == TC_H_ROOT - class is root, which has no parent. parent == X:0 - parent is root class. parent == X:Y - parent is a node in hierarchy. parent == 0:Y - parent is X:Y, where X:0 is qdisc. 如下是tc class的classid參數取值的說明 handle == 0:0 - generate handle from kernel pool. handle == 0:Y - class is X:Y, where X:0 is qdisc. handle == X:Y - clear. handle == X:0 - root class. */ /* Step 1. Determine qdisc handle X:0 */ if (pid != TC_H_ROOT) {//parent id非根節點的狀況 u32 qid1 = TC_H_MAJ(pid); if (qid && qid1) { /* If both majors are known, they must be identical. */ if (qid != qid1) return -EINVAL; } else if (qid1) { qid = qid1; } else if (qid == 0) qid = dev->qdisc_sleeping->handle; /* Now qid is genuine qdisc handle consistent both with parent and child. TC_H_MAJ(pid) still may be unspecified, complete it now. */ if (pid) pid = TC_H_MAKE(qid, pid); } else {//爲根節點, 若是當前qid爲0, 更新爲設備的qdisc_sleeping的handle if (qid == 0) qid = dev->qdisc_sleeping->handle; } /* OK. Locate qdisc */ //根據qid查找該dev上的Qdisc指針, 找不到的話返回失敗 if ((q = qdisc_lookup(dev, qid)) == NULL) return -ENOENT; /* An check that it supports classes */ //獲取Qdisc的類別操做指針 cops = q->ops->cl_ops; if (cops == NULL)//若是Qdisc是非分類的, 類別操做結構指針位空, 返回失敗 return -EINVAL; /* Now try to get class */ //生成合法的類別ID if (clid == 0) { if (pid == TC_H_ROOT) clid = qid; } else clid = TC_H_MAKE(qid, clid); //若是clid非0, 調用get函數獲取該類別, 增長類別的引用計數 //cl雖然定義是unsigned long, 但實際是個指針的數值 if (clid) cl = cops->get(q, clid); if (cl == 0) {//類別爲空 err = -ENOENT; //若是netlink命令不是新建類別的話, 返回錯誤 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE)) goto out; } else { switch (n->nlmsg_type) {//獲取類別成功, 根據netlink命令類型進行相關操做 case RTM_NEWTCLASS://新建class err = -EEXIST; if (n->nlmsg_flags&NLM_F_EXCL)//若是設置了互斥標誌, 返回錯誤, 由於如今該class已經存在 goto out; break; case RTM_DELTCLASS://刪除class err = cops->delete(q, cl); if (err == 0) tclass_notify(skb, n, q, cl, RTM_DELTCLASS); goto out; case RTM_GETTCLASS://獲取class信息, 進行class通知操做 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); goto out; default: err = -EINVAL; goto out; } } new_cl = cl; //不管是新建仍是修改class參數, 都是調用類別操做結構的change函數 err = cops->change(q, clid, pid, tca, &new_cl); if (err == 0) tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); out: if (cl) cops->put(q, cl); return err; } 類別輸出 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) { int t; int s_t; struct net_device *dev; struct Qdisc *q; struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); struct qdisc_dump_args arg; //輸入數據長度檢查 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) return 0; if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)//查找網卡設備 return 0; s_t = cb->args[0];//起始class索引 t = 0; list_for_each_entry(q, &dev->qdisc_list, list) { //當前索引號小於起始索引號, 或者當前Qdisc是非分類的, 或者句柄handle不匹配, 跳過 if (t < s_t || !q->ops->cl_ops || (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)) { t++; continue; } if (t > s_t)//索引號超過了起始索引號, 將從數組1號開始的數據緩衝區清零 memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); //填寫arg結構參數,輸出單個class函數 arg.w.fn = qdisc_class_dump; arg.skb = skb;//數據包指針 arg.cb = cb;//控制塊指針 //遍歷結構walker參數 arg.w.stop = 0; arg.w.skip = cb->args[1]; arg.w.count = 0; q->ops->cl_ops->walk(q, &arg.w);//調用Qdisc類別操做結構的walk函數遍歷該Qdisc全部類別 cb->args[1] = arg.w.count;//記錄處理的類別數 if (arg.w.stop)//若是設置了中止標誌, 退出循環 break; t++; } cb->args[0] = t;//跳過的Qdisc數, 有的Qdisc多是跳過沒處理的 dev_put(dev); return skb->len; } [/用戶內核交互實現] [輸入流量控制實現] 如今咱們看一下加入咱們使用輸入流量控制的狀況. 在net/sched/sch_ingress.c中定義了輸入流量控制實現. 模塊初始化 static int __init ingress_module_init(void) { int ret = 0; if ((ret = register_qdisc(&ingress_qdisc_ops)) < 0) { //註冊這個操做 printk("Unable to register Ingress qdisc\n"); return ret; } return ret; } static struct Qdisc_ops *qdisc_base; int register_qdisc(struct Qdisc_ops *qops) { struct Qdisc_ops *q, **qp; int rc = -EEXIST; write_lock(&qdisc_mod_lock); //查找看是否有重複 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) if (!strcmp(qops->id, q->id)) goto out; //三個主要函數若是有空就設置爲僞操做(函數什麼也不作) if (qops->enqueue == NULL) qops->enqueue = noop_qdisc_ops.enqueue; if (qops->requeue == NULL) qops->requeue = noop_qdisc_ops.requeue; if (qops->dequeue == NULL) qops->dequeue = noop_qdisc_ops.dequeue; qops->next = NULL; *qp = qops; //添加到最後一項 rc = 0; out: write_unlock(&qdisc_mod_lock); return rc; } ingress私有數據結構 struct ingress_qdisc_data { struct Qdisc *q;//內部流控 struct tcf_proto *filter_list;//過濾規則 }; ingress流控操做結構 static struct Qdisc_ops ingress_qdisc_ops = { .next = NULL, .cl_ops = &ingress_class_ops, //類別操做 .id = "ingress", //查找使用 .priv_size = sizeof(struct ingress_qdisc_data), .enqueue = ingress_enqueue, .dequeue = ingress_dequeue, .requeue = ingress_requeue, .drop = ingress_drop, .init = ingress_init, .reset = ingress_reset, .destroy = ingress_destroy, .change = NULL, .dump = ingress_dump, .owner = THIS_MODULE, }; 初始化,在建立Qdisc時被調用 static int ingress_init(struct Qdisc *sch, struct rtattr *opt) { struct ingress_qdisc_data *p = PRIV(sch); /* Make sure either netfilter or preferably CLS_ACT is compiled in */ //輸入接收流控必須在內核選項中定義分類動做NET_CLS_ACT或NETFILTER #ifndef CONFIG_NET_CLS_ACT #ifndef CONFIG_NETFILTER printk("You MUST compile classifier actions into the kernel\n"); return -EINVAL; #else//使用NET_CLS_ACT優於使用NETFILTER printk("Ingress scheduler: Classifier actions prefered over netfilter\n"); #endif #endif //沒定義NET_CLS_ACT, 而定義了NETFILTER的狀況,登記輸入流控的netfilter鉤子節點函數 //定義了NET_CLS_ACT就不實用netfilter的hook函數了,在netif_receive_skb中就會處理了,看上面輸入流程. #ifndef CONFIG_NET_CLS_ACT #ifdef CONFIG_NETFILTER if (!nf_registered) { if (nf_register_hook(&ing_ops) < 0) { printk("ingress qdisc registration error \n"); return -EINVAL; } nf_registered++;//非0表示已經登記了 if (nf_register_hook(&ing6_ops) < 0) { printk("IPv6 ingress qdisc registration error, disabling IPv6 support.\n"); } else nf_registered++; } #endif #endif DPRINTK("ingress_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); p->q = &noop_qdisc;//初始內部流控初始化爲noop, 丟包使用的 return 0; } hook函數 static unsigned int ing_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *indev, const struct net_device *outdev, int (*okfn)(struct sk_buff *)) { struct Qdisc *q; struct net_device *dev = skb->dev; int fwres = NF_ACCEPT; DPRINTK("ing_hook: skb %s dev=%s len=%u\n", skb->sk ? "(owned)" : "(unowned)", skb->dev ? skb->dev->name : "(no dev)", skb->len); if (dev->qdisc_ingress) { //有輸入控制 spin_lock(&dev->ingress_lock); if ((q = dev->qdisc_ingress) != NULL) fwres = q->enqueue(skb, q); //入隊 spin_unlock(&dev->ingress_lock); } return fwres; } static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) { struct ingress_qdisc_data *p = PRIV(sch);//接收流控私有數據 struct tcf_result res;//分類結果 int result; result = tc_classify(skb, p->filter_list, &res);//對數據進行分類,參考過濾操做實現 #ifdef CONFIG_NET_CLS_ACT //在定義了NET_CLS_ACT的狀況, 這時不會返回NF_*, 而是返回TC_ACT_* sch->bstats.packets++; sch->bstats.bytes += skb->len; switch (result) { case TC_ACT_SHOT: result = TC_ACT_SHOT; sch->qstats.drops++; break; case TC_ACT_STOLEN: case TC_ACT_QUEUED: result = TC_ACT_STOLEN; break; case TC_ACT_RECLASSIFY: case TC_ACT_OK: skb->tc_index = TC_H_MIN(res.classid);//接受, 計算tc_index default: result = TC_ACT_OK; break; } #else result = NF_ACCEPT; sch->bstats.packets++; sch->bstats.bytes += skb->len; #endif return result; } int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { int err = 0; __be16 protocol; #ifdef CONFIG_NET_CLS_ACT struct tcf_proto *otp = tp; reclassify: #endif protocol = skb->protocol; err = tc_classify_compat(skb, tp, res); //分類比較 #ifdef CONFIG_NET_CLS_ACT if (err == TC_ACT_RECLASSIFY) { u32 verd = G_TC_VERD(skb->tc_verd); tp = otp; if (verd++ >= MAX_REC_LOOP) { printk("rule prio %u protocol %02x reclassify loop, packet dropped\n", tp->prio&0xffff, ntohs(tp->protocol)); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); goto reclassify; } #endif return err; } int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { __be16 protocol = skb->protocol; int err = 0; for (; tp; tp = tp->next) { //循環全部過濾操做,調用相關分類操做 if ((tp->protocol == protocol || tp->protocol == htons(ETH_P_ALL)) && (err = tp->classify(skb, tp, res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT if (err != TC_ACT_RECLASSIFY && skb->tc_verd) skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); #endif return err; } } return -1; } 重入隊,什麼也不作 static int ingress_requeue(struct sk_buff *skb,struct Qdisc *sch) { return 0; } 出隊,空函數,由於不會有真正的dequeue操做 static struct sk_buff *ingress_dequeue(struct Qdisc *sch) { return 0; } 丟包 static unsigned int ingress_drop(struct Qdisc *sch) { return 0; } 復位 static void ingress_reset(struct Qdisc *sch) { struct ingress_qdisc_data *p = PRIV(sch); qdisc_reset(p->q);//復位內部流控節點 } 釋放 static void ingress_destroy(struct Qdisc *sch) { struct ingress_qdisc_data *p = PRIV(sch); tcf_destroy_chain(p->filter_list);//釋放TC過濾規則 } 輸出參數 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) { unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; rta = (struct rtattr *) b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL);//起始什麼數據也沒有 rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: nlmsg_trim(skb, b); return -1; } ingress類別操做結構 static struct Qdisc_class_ops ingress_class_ops = { .graft = ingress_graft, //嫁接, 增長葉子節點,恆返回1 .leaf = ingress_leaf, //直接返回 NULL .get = ingress_get, //計數增長, 使用類別ID計算 return TC_H_MIN(classid) + 1; .put = ingress_put, //什麼也沒有 .change = ingress_change, //直接返回0 .delete = NULL, .walk = ingress_walk, //遍歷,什麼也沒有 .tcf_chain = ingress_find_tcf, .bind_tcf = ingress_bind_filter, .unbind_tcf = ingress_put, .dump = NULL, }; 獲取TC過濾規則表 static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch,unsigned long cl) { struct ingress_qdisc_data *p = PRIV(sch); return &p->filter_list; } 綁定TC過濾規則表 static unsigned long ingress_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return ingress_get(sch, classid); } [/輸入流量控制實現] [filter操做實現] tc filter命令是用來定義數據包進行分類的命令, 中間就要用到各類匹配條件, 其功能就象netfilter的match同樣, filter的處理和class的處理是緊密聯繫在一塊兒的,用於完成對數據包的分類。 tc過濾協議結構 struct tcf_proto { /* Fast access part */ struct tcf_proto *next;//鏈表中的下一項 void *root;//根節點 //分類操做函數, 一般是tcf_proto_ops的classify函數, 就象Qdisc結構中的enqueue就是 //Qdisc_class_ops中的enqueue同樣, 目的是向上層隱藏tcf_proto_ops結構 int (*classify)(struct sk_buff*, struct tcf_proto*, struct tcf_result *); __be16 protocol;//協議 /* All the rest */ u32 prio;//優先權 u32 classid;//類別ID struct Qdisc *q;//流控節點 void *data;//私有數據 struct tcf_proto_ops *ops;//filter操做結構 }; filter操做結構, 實際就是定義匹配操做, 一般每一個匹配操做都由一個靜態tcf_proto_ops結構定義, 做爲一個內核模塊, 初始化事登記系統的鏈表 struct tcf_proto_ops { struct tcf_proto_ops *next;//鏈表中的下一項 char kind[IFNAMSIZ];//名稱 int (*classify)(struct sk_buff*, struct tcf_proto*, struct tcf_result *);//分類操做 int (*init)(struct tcf_proto*);//初始化 void (*destroy)(struct tcf_proto*);//釋放 unsigned long (*get)(struct tcf_proto*, u32 handle);//獲取, 增長引用 void (*put)(struct tcf_proto*, unsigned long);//減小引用 int (*change)(struct tcf_proto*, unsigned long, u32 handle, struct rtattr **, unsigned long *);//參數修改 int (*delete)(struct tcf_proto*, unsigned long);//刪除 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);//遍歷 /* rtnetlink specific */ // 輸出 int (*dump)(struct tcf_proto*, unsigned long, struct sk_buff *skb, struct tcmsg*); struct module *owner; }; filter操做結果, 返回分類結果: 類別和類別ID struct tcf_result { unsigned long class; u32 classid; }; 初始化 static int __init tc_filter_init(void) //net/sched/cls_api.c { //定義filter操做處理函數,增長/刪除/獲取等操做 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL); rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL); rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, tc_dump_tfilter); return 0; } 用於增長, 修改, 刪除, 獲取過濾結構 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) { struct rtattr **tca; struct tcmsg *t; u32 protocol; u32 prio; u32 nprio; u32 parent; struct net_device *dev; struct Qdisc *q; struct tcf_proto **back, **chain; struct tcf_proto *tp; struct tcf_proto_ops *tp_ops; struct Qdisc_class_ops *cops; unsigned long cl; unsigned long fh; int err; replay: tca = arg; t = NLMSG_DATA(n); //TC信息的低16位是協議, 高16位是優先權 protocol = TC_H_MIN(t->tcm_info); prio = TC_H_MAJ(t->tcm_info); nprio = prio;//備份優先權參數 parent = t->tcm_parent; cl = 0; if (prio == 0) {//若是沒指定優先權值, 在新建filter狀況下構造一個缺省值, 其餘狀況則是錯誤 /* If no priority is given, user wants we allocated it. */ if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) return -ENOENT; prio = TC_H_MAKE(0x80000000U,0U); } /* Find head of filter chain. */ /* Find link */ if ((dev = __dev_get_by_index(&init_net, t->tcm_ifindex)) == NULL)//查找網卡設備 return -ENODEV; /* Find qdisc */ //查找網卡所用的Qdisc if (!parent) { q = dev->qdisc_sleeping;//根節點的狀況, 使用qdisc_sleeping parent = q->handle; } else if ((q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent))) == NULL)//非根節點的話根據handle查找 return -EINVAL; /* Is it classful? */ if ((cops = q->ops->cl_ops) == NULL)//若是該流控不支持分類操做, 返回失敗 return -EINVAL; /* Do we search for filter, attached to class? */ if (TC_H_MIN(parent)) {//低16位是子類別值 cl = cops->get(q, parent);//獲取類別結構, cl實際就是結構指針轉的unsigned long值 if (cl == 0) return -ENOENT; } /* And the last stroke */ chain = cops->tcf_chain(q, cl);//獲取過濾規則鏈表頭地址, 由於是地址的地址, 因此這個值基本不該該是空的 err = -EINVAL; if (chain == NULL) goto errout; /* Check the chain for existence of proto-tcf with this priority */ //遍歷規則鏈表, 這個鏈表是有序表, 由小到大 for (back = chain; (tp=*back) != NULL; back = &tp->next) { if (tp->prio >= prio) {//若是某過濾規則的優先權值大於指定的prio if (tp->prio == prio) {//若是優先權相同 if (!nprio || (tp->protocol != protocol && protocol)) goto errout; } else//不然優先權不一樣, 沒有相同的優先權的節點, tp置爲空 tp = NULL; break; } } //退出循環時, *back指向要鏈表中插入的位置後面那個的節點 if (tp == NULL) {//tp爲空, 當前規則中不存在指定優先權的節點 /* Proto-tcf does not exist, create new one */ if (tca[TCA_KIND-1] == NULL || !protocol)//若是參數不全, 返回失敗 goto errout; err = -ENOENT; if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))//若是不是新建命令, 返回失敗 goto errout; /* Create new proto tcf */ err = -ENOBUFS; if ((tp = kzalloc(sizeof(*tp), GFP_KERNEL)) == NULL)//分配新的tcf_proto結構節點 goto errout; err = -EINVAL; tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]);//根據名稱查找tp操做結構, 好比rsvp, u32, fw(參看下面)等 if (tp_ops == NULL) { #ifdef CONFIG_KMOD //若是當前內核中沒找到的話, 使用模塊方式加載後從新查找 struct rtattr *kind = tca[TCA_KIND-1]; char name[IFNAMSIZ]; //檢查一下名稱算法合法 if (kind != NULL && rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { rtnl_unlock(); request_module("cls_%s", name); rtnl_lock(); tp_ops = tcf_proto_lookup_ops(kind); if (tp_ops != NULL) { module_put(tp_ops->owner); err = -EAGAIN; } } #endif //釋放tcf_proto空間, 返回失敗值 kfree(tp); goto errout; } //設置結構各參數 tp->ops = tp_ops; tp->protocol = protocol; tp->prio = nprio ? : tcf_auto_prio(*back); tp->q = q; //classify函數賦值 tp->classify = tp_ops->classify; tp->classid = parent; if ((err = tp_ops->init(tp)) != 0) {//調用tp_ops的初始化函數初始化 module_put(tp_ops->owner); kfree(tp); goto errout; } qdisc_lock_tree(dev); //將tp插入*back節點前面 tp->next = *back; *back = tp; qdisc_unlock_tree(dev); //找到了節點, 比較一下名稱, 不一樣的話返回錯誤 } else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind)) goto errout; //獲取與t->tcm_handle對應的filte fh = tp->ops->get(tp, t->tcm_handle); if (fh == 0) {//獲取filter失敗 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {//若是是刪除命令, 並且TC信息的句柄爲0, 則可認爲刪除操做是成功的 qdisc_lock_tree(dev); *back = tp->next;//將找到的tp從鏈表中斷開 qdisc_unlock_tree(dev); tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);//刪除通告, 釋放tp tcf_destroy(tp); err = 0;//命令成功 goto errout; } //若是不是新建filter的話, 沒找到filter就表示失敗 err = -ENOENT; if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) goto errout; } else {//找到filter, 根據命令類型進行操做 switch (n->nlmsg_type) { case RTM_NEWTFILTER://新建filter, 若是定義了互斥標誌, 返回錯誤, 由於filter已經存在了 err = -EEXIST; if (n->nlmsg_flags&NLM_F_EXCL) goto errout; break; case RTM_DELTFILTER://刪除filter命令, 運行tcf_proto_ops的delete函數 err = tp->ops->delete(tp, fh); if (err == 0)//若是操做成功, 發送通告消息 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); goto errout; case RTM_GETTFILTER://獲取filter命令, 發送通告信息, 其中包含了filter的參數 err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); goto errout; default: err = -EINVAL; goto errout; } } //新建,修改操做都經過tcf_proto_ops的change函數完成 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); if (err == 0)//若是操做成功, 發送通告消息 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); errout: if (cl) cops->put(q, cl); if (err == -EAGAIN) /* Replay the request. */ goto replay; return err; } fw分類方法主要是根據skb中mark參數來進行數據分類, 而該參數是由netfilter定義的, 若是內核裏沒有定義netfilter, 該分類方法意義不大,該分類方法在 net/sched/cls_fw.c 中定義 static struct tcf_proto_ops cls_fw_ops = { .next = NULL, .kind = "fw", .classify = fw_classify, .init = fw_init, //空函數,直接返回0 .destroy = fw_destroy, .get = fw_get, .put = fw_put, //空函數 .change = fw_change, .delete = fw_delete, .walk = fw_walk, .dump = fw_dump, .owner = THIS_MODULE, }; 初始化,可做爲模塊 static int __init init_fw(void) { return register_tcf_proto_ops(&cls_fw_ops);//註冊這個操做 } 分類方法, 返回負數表示分類失敗, 返回0表示分類成功,分類結果在res中返回 static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { struct fw_head *head = (struct fw_head*)tp->root;//HASH鏈表頭 struct fw_filter *f; int r; u32 id = skb->mark; if (head != NULL) { id &= head->mask; //根據id進行hash, 遍歷合適的鏈表 for (f=head->ht[fw_hash(id)]; f; f=f->next) { if (f->id == id) {//若是ID相同, 合適的話能夠返回 *res = f->res; #ifdef CONFIG_NET_CLS_IND if (!tcf_match_indev(skb, f->indev))//網卡設備匹配 continue; #endif /* CONFIG_NET_CLS_IND */ r = tcf_exts_exec(skb, &f->exts, res);//若是沒有定義CONFIG_NET_CLS_ACT話就是個空函數, 返回0 if (r < 0) continue; return r; } } } else { /* old method */ //老分類方法, id非0, id高16爲0或和Qdisc的handle的高16位相同時, 分類成功 if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id ^ tp->q->handle)))) { res->classid = id; res->class = 0; return 0; } } } static inline int tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, struct tcf_result *res) { #ifdef CONFIG_NET_CLS_ACT if (exts->action) return tcf_action_exec(skb, exts->action, res); //執行具體動做 #endif return 0; } 執行動做 int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, struct tcf_result *res) { struct tc_action *a; int ret = -1; if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); ret = TC_ACT_OK; goto exec_done; } while ((a = act) != NULL) { //循環調用具體動做 repeat: if (a->ops && a->ops->act) { ret = a->ops->act(skb, a, res); //具體動做函數,參看下面動做操做實現 if (TC_MUNGED & skb->tc_verd) { /* copied already, allow trampling */ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); } if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ if (ret != TC_ACT_PIPE) goto exec_done; } act = a->next; } exec_done: return ret; } 獲取過濾器 static unsigned long fw_get(struct tcf_proto *tp, u32 handle) { struct fw_head *head = (struct fw_head*)tp->root; struct fw_filter *f; if (head == NULL) return 0; // 用handle進行哈希, 遍歷指定的hash表 for (f=head->ht[fw_hash(handle)]; f; f=f->next) { if (f->id == handle)//若是有filter的id和handle相同, 返回 return (unsigned long)f; } return 0; } 新建, 修改都經過該函數完成 static int fw_change(struct tcf_proto *tp, unsigned long base, u32 handle, struct rtattr **tca, unsigned long *arg) { struct fw_head *head = (struct fw_head*)tp->root;//根哈希節點 struct fw_filter *f = (struct fw_filter *) *arg;//fw過濾器指針 struct rtattr *opt = tca[TCA_OPTIONS-1];//選項參數 struct rtattr *tb[TCA_FW_MAX]; int err; if (!opt)//若是沒提供選項, 在提供了handle的狀況下錯誤, 不然返回成功 return handle ? -EINVAL : 0; if (rtattr_parse_nested(tb, TCA_FW_MAX, opt) < 0)//解析選項參數是否合法 return -EINVAL; if (f != NULL) {//fw過濾器非空, 修改操做 if (f->id != handle && handle)//修改的狀況下, 若是handle值非0, 並且和fw過濾器的id不一樣的話, 返回參數錯誤 return -EINVAL; return fw_change_attrs(tp, f, tb, tca, base);//進行參數修改操做 } if (!handle)//新建fw過濾器的狀況, 若是handle爲0, 返回參數錯誤 return -EINVAL; if (head == NULL) {//鏈表頭爲空, 第一次操做 u32 mask = 0xFFFFFFFF;//缺省掩碼 if (tb[TCA_FW_MASK-1]) {//若是在命令參數中定義了掩碼, 獲取之 if (RTA_PAYLOAD(tb[TCA_FW_MASK-1]) != sizeof(u32)) return -EINVAL; mask = *(u32*)RTA_DATA(tb[TCA_FW_MASK-1]); } //分配鏈表頭空間 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); if (head == NULL) return -ENOBUFS; head->mask = mask;//掩碼 tcf_tree_lock(tp); tp->root = head;//做爲系統的根哈希鏈表頭 tcf_tree_unlock(tp); } //分配新的fw過濾器結構指針 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); if (f == NULL) return -ENOBUFS; //使用handle值做爲fw過濾器的ID f->id = handle; //調用修改函數進行賦值操做 err = fw_change_attrs(tp, f, tb, tca, base); if (err < 0) goto errout; //添加到合適的hash鏈表的頭 f->next = head->ht[fw_hash(handle)]; tcf_tree_lock(tp); head->ht[fw_hash(handle)] = f; tcf_tree_unlock(tp); *arg = (unsigned long)f;//將fw過濾器做爲參數返回 return 0; errout: kfree(f); return err; } 參數修改處理 static int fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f, struct rtattr **tb, struct rtattr **tca, unsigned long base) { struct fw_head *head = (struct fw_head *)tp->root; struct tcf_exts e; u32 mask; int err; //tcf擴展驗證操做 err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &fw_ext_map); if (err < 0) return err; err = -EINVAL; if (tb[TCA_FW_CLASSID-1]) {//命令參數中提供了類別ID if (RTA_PAYLOAD(tb[TCA_FW_CLASSID-1]) != sizeof(u32)) goto errout; //類別ID賦值 f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]); tcf_bind_filter(tp, &f->res, base); } #ifdef CONFIG_NET_CLS_IND if (tb[TCA_FW_INDEV-1]) {//網卡設備 err = tcf_change_indev(tp, f->indev, tb[TCA_FW_INDEV-1]); if (err < 0) goto errout; } #endif /* CONFIG_NET_CLS_IND */ if (tb[TCA_FW_MASK-1]) {//FW掩碼 if (RTA_PAYLOAD(tb[TCA_FW_MASK-1]) != sizeof(u32)) goto errout; mask = *(u32*)RTA_DATA(tb[TCA_FW_MASK-1]); if (mask != head->mask) goto errout; } else if (head->mask != 0xFFFFFFFF) goto errout; tcf_exts_change(tp, &f->exts, &e);//將e中的數據賦值到f->exts中 return 0; errout: tcf_exts_destroy(tp, &e); return err; } [/filter操做實現] [動做操做實現] tc action命令是用來定義數據包進行最終處理方法的命令, 其功能就象netfilter的target同樣, 須要內核定義NET_CLS_ACT選項。 動做處理的基本api在net/sched/act_api.c中定義, 而各類動做方法在net/sched/act_*.c中定義。 struct tc_action { void *priv;//私有數據 struct tc_action_ops *ops;//操做結構 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 order;//階數 struct tc_action *next; }; action操做結構, 實際就是定義目標操做, 一般每一個匹配操做都由一個靜態tcf_action_ops結構定義, 做爲一個內核模塊, 初始化登記到系統的鏈表. struct tc_action_ops { struct tc_action_ops *next; struct tcf_hashinfo *hinfo; char kind[IFNAMSIZ];//名稱 __u32 type; /* TBD to match kind */ __u32 capab; /* capabilities includes 4 bit version */ struct module *owner; int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *);//動做 int (*get_stats)(struct sk_buff *, struct tc_action *);//獲取統計參數 int (*dump)(struct sk_buff *, struct tc_action *, int, int);//輸出 int (*cleanup)(struct tc_action *, int bind);//清除 int (*lookup)(struct tc_action *, u32);//查找 int (*init)(struct rtattr *, struct rtattr *, struct tc_action *, int , int);//初始化 int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *);//遍歷 }; 初始化 static int __init tc_action_init(void) //net/sched/act_api.c { //定義action操做處理函數,增長/刪除/獲取等操做 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL); rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL); rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action); return 0; } subsys_initcall(tc_action_init); 這些內容和上面咱們看到的都大同小議,因此這就不在看了,咱們仍是看一個具體的活動實現。 packet mirroring and redirect actions mirred動做是對數據進行鏡像和重定向操做, 將數據包從指定網卡發出, 在net/sched/act_mirred.c中定義. static struct tc_action_ops act_mirred_ops = { .kind = "mirred",//名稱 .hinfo = &mirred_hash_info, .type = TCA_ACT_MIRRED,//類型 .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_mirred, .dump = tcf_mirred_dump, .cleanup = tcf_mirred_cleanup, .lookup = tcf_hash_search,//查找, 通用函數 .init = tcf_mirred_init, .walk = tcf_generic_walker//遍歷, 通用函數 }; 初始化 static int tcf_mirred_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, int ovr, int bind) { struct rtattr *tb[TCA_MIRRED_MAX]; struct tc_mirred *parm; struct tcf_mirred *m; struct tcf_common *pc; struct net_device *dev = NULL; int ret = 0; int ok_push = 0; if (rta == NULL || rtattr_parse_nested(tb, TCA_MIRRED_MAX, rta) < 0)//解析參數, 保存於tb數組, 失敗返回 return -EINVAL; //必需要有MIRRED參數 if (tb[TCA_MIRRED_PARMS-1] == NULL || RTA_PAYLOAD(tb[TCA_MIRRED_PARMS-1]) < sizeof(*parm)) return -EINVAL; parm = RTA_DATA(tb[TCA_MIRRED_PARMS-1]); if (parm->ifindex) {//若是定義了網卡索引號 dev = __dev_get_by_index(&init_net, parm->ifindex);//查找相應的網卡設備結構 if (dev == NULL) return -ENODEV; switch (dev->type) {//如下類型的網卡擴展硬件頭, 這些一般是虛擬網卡 case ARPHRD_TUNNEL: case ARPHRD_TUNNEL6: case ARPHRD_SIT: case ARPHRD_IPGRE: case ARPHRD_VOID: case ARPHRD_NONE: ok_push = 0; break; default://其餘類型網卡須要擴展硬件頭 ok_push = 1; break; } } //根據索引號查找common節點, 綁定到a節點(priv) pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); if (!pc) {//若是節點爲空 //必需要有網卡參數 if (!parm->ifindex) return -EINVAL; //建立新的common節點 pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, &mirred_idx_gen, &mirred_hash_info); if (unlikely(!pc)) return -ENOMEM; ret = ACT_P_CREATED;//新建標誌 } else { if (!ovr) {//ovr是替代標誌, 若是不是替代操做, 對象已經存在, 操做失敗 tcf_mirred_release(to_mirred(pc), bind); return -EEXIST; } } m = to_mirred(pc);//轉換爲mirred動做結構 spin_lock_bh(&m->tcf_lock); m->tcf_action = parm->action;//動做 m->tcfm_eaction = parm->eaction;//實際動做 if (parm->ifindex) {//填充網卡參數 m->tcfm_ifindex = parm->ifindex; if (ret != ACT_P_CREATED)//若是不是新建操做, 減小網卡計數, 由於已經引用過了 dev_put(m->tcfm_dev); m->tcfm_dev = dev;//網卡 dev_hold(dev); m->tcfm_ok_push = ok_push;//硬件頭擴展標誌 } spin_unlock_bh(&m->tcf_lock); if (ret == ACT_P_CREATED)//若是是新建節點, 插入哈希表 tcf_hash_insert(pc, &mirred_hash_info); return ret; } 動做,將數據包從指定網卡發出 static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_mirred *m = a->priv;//mirred動做結構 struct net_device *dev; struct sk_buff *skb2 = NULL; u32 at = G_TC_AT(skb->tc_verd);//數據包自身的動做信息 spin_lock(&m->tcf_lock); dev = m->tcfm_dev;//網卡 m->tcf_tm.lastuse = jiffies;//最後使用時間 if (!(dev->flags&IFF_UP) ) {//若是該網卡沒運行, 丟包 if (net_ratelimit()) printk("mirred to Houston: device %s is gone!\n", dev->name); bad_mirred: if (skb2 != NULL) kfree_skb(skb2); m->tcf_qstats.overlimits++; m->tcf_bstats.bytes += skb->len; m->tcf_bstats.packets++; spin_unlock(&m->tcf_lock); /* should we be asking for packet to be dropped? * may make sense for redirect case only */ return TC_ACT_SHOT; } skb2 = skb_act_clone(skb, GFP_ATOMIC);//克隆數據包用於鏡像或重定向 if (skb2 == NULL) goto bad_mirred; //若是實際動做既不是鏡像也不是重定向, 出錯返回 if (m->tcfm_eaction != TCA_EGRESS_MIRROR && m->tcfm_eaction != TCA_EGRESS_REDIR) { if (net_ratelimit()) printk("tcf_mirred unknown action %d\n", m->tcfm_eaction); goto bad_mirred; } //統計數更新 m->tcf_bstats.bytes += skb2->len; m->tcf_bstats.packets++; if (!(at & AT_EGRESS))//若是不是發出的, 根據須要擴展硬件頭 if (m->tcfm_ok_push) skb_push(skb2, skb2->dev->hard_header_len); /* mirror is always swallowed */ if (m->tcfm_eaction != TCA_EGRESS_MIRROR)//實際動做不是鏡像, 從新設置TC斷定值 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); skb2->dev = dev;//將克隆的數據包從指定網卡發出 skb2->iif = skb->dev->ifindex; //記錄原始數據包設備索引 dev_queue_xmit(skb2); spin_unlock(&m->tcf_lock); return m->tcf_action; } [/動做操做實現]