承接上文,本節以ICMP和TCP爲例介紹與網絡相關的部份內容。html
首先看下促使我學習bcc的這篇文章中的程序traceicmpsoftirq.py,使用該程序的本意是找出對ping響應的進程位於哪一個CPU core上,而後使用perf
掃描該core,找出形成網絡延遲的緣由。源碼以下:python
#!/usr/bin/python bpf_text = """ #include <linux/ptrace.h> #include <linux/sched.h> /* For TASK_COMM_LEN */ #include <linux/icmp.h> #include <linux/netdevice.h> struct probe_icmp_data_t { u64 timestamp_ns; u32 tgid; u32 pid; char comm[TASK_COMM_LEN]; int v0; }; BPF_PERF_OUTPUT(probe_icmp_events); static inline unsigned char *my_skb_transport_header(const struct sk_buff *skb) { return skb->head + skb->transport_header; } static inline struct icmphdr *my_icmp_hdr(const struct sk_buff *skb) { return (struct icmphdr *)my_skb_transport_header(skb); } int probe_icmp(struct pt_regs *ctx, struct sk_buff *skb) { u64 __pid_tgid = bpf_get_current_pid_tgid(); u32 __tgid = __pid_tgid >> 32; u32 __pid = __pid_tgid; // implicit cast to u32 for bottom half struct probe_icmp_data_t __data = {0}; __data.timestamp_ns = bpf_ktime_get_ns(); __data.tgid = __tgid; __data.pid = __pid; bpf_get_current_comm(&__data.comm, sizeof(__data.comm)); __be16 seq; bpf_probe_read_kernel(&seq, sizeof(seq), &my_icmp_hdr(skb)->un.echo.sequence); __data.v0 = (int)seq; probe_icmp_events.perf_submit(ctx, &__data, sizeof(__data)); return 0; } """ from bcc import BPF import ctypes as ct class Data_icmp(ct.Structure): _fields_ = [ ("timestamp_ns", ct.c_ulonglong), ("tgid", ct.c_uint), ("pid", ct.c_uint), ("comm", ct.c_char * 16), # TASK_COMM_LEN ('v0', ct.c_uint), ] b = BPF(text=bpf_text) def print_icmp_event(cpu, data, size): #event = b["probe_icmp_events"].event(data) event = ct.cast(data, ct.POINTER(Data_icmp)).contents print("%-7d %-7d %-15s %s" % (event.tgid, event.pid, event.comm.decode('utf-8', 'replace'), event.v0)) b.attach_kprobe(event="icmp_echo", fn_name="probe_icmp") b["probe_icmp_events"].open_perf_buffer(print_icmp_event) while 1: try: b.kprobe_poll() except KeyboardInterrupt: exit()
上面程序對icmp_echo
內核函數進行打點探測,當內核運行該函數時會執行自定義的函數probe_icmp
,並獲取當前的tgid,pid以及icmp報文的序列號。linux
內容以下:git
my_skb_transport_header
:該函數經過偏移sk_buff指針獲取傳輸層首部地址,用於後續獲取icmp首部的序列號。此處的操做能夠直接參考static bool icmp_echo(struct sk_buff *skb)
的內核源碼,其獲取icmp首部的方式依次爲:github
static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { return (struct icmphdr *)skb_transport_header(skb); } static inline unsigned char *skb_transport_header(const struct sk_buff *skb) { return skb->head + skb->transport_header; }
能夠看到skb_transport_header
的處理與本程序的方式是同樣的,將該函數的實現直接移植過去便可。須要注意的是,不能直接調用內核函數skb_transport_header
獲取transport_header
的地址。c#
bpf_get_current_pid_tgid()
:獲取當前的PID。須要注意的是該函數獲取的是當前CPU上運行的進程ID,而不是某一個特定的進程ID。其內核源碼以下:api
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) { struct cgroup *cgrp = task_dfl_cgroup(current); struct cgroup *ancestor; ancestor = cgroup_ancestor(cgrp, ancestor_level); if (!ancestor) return 0; return cgroup_id(ancestor); }
而current定義以下,用於得到當前執行進程的task_struct指針。更多參見這篇文章。網絡
#define current get_current()
所以以本程序爲例,若是對icmp_echo的打點採集中若是發生了上下文切換,可能bpf_get_current_pid_tgid
獲取到的多是切換後的程序。本文也是藉助這種機制,發如今切換到cadvisor
致使了網絡延時。app
bpf_probe_read_kernel
:讀取內核結構體的成員,原文中使用的是bpf_probe_read
,更多參見issue。socket
其他部分與檢測可觀測性相同。
下面看一下TCP的探測,用於跟蹤內核代碼tcp_v4_connect
或tcp_v6_connect
,代碼源自官方庫tools/tcpconnect
#!/usr/bin/python from __future__ import print_function from bcc import BPF from bcc.containers import filter_by_containers from bcc.utils import printb import argparse from socket import inet_ntop, ntohs, AF_INET, AF_INET6 from struct import pack from time import sleep # arguments examples = """examples: ./tcpconnect # trace all TCP connect()s ./tcpconnect -t # include timestamps ./tcpconnect -p 181 # only trace PID 181 ./tcpconnect -P 80 # only trace port 80 ./tcpconnect -P 80,81 # only trace port 80 and 81 ./tcpconnect -U # include UID ./tcpconnect -u 1000 # only trace UID 1000 ./tcpconnect -c # count connects per src ip and dest ip/port ./tcpconnect --cgroupmap mappath # only trace cgroups in this BPF map ./tcpconnect --mntnsmap mappath # only trace mount namespaces in the map """ parser = argparse.ArgumentParser( description="Trace TCP connects", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=examples) parser.add_argument("-t", "--timestamp", action="store_true", help="include timestamp on output") parser.add_argument("-p", "--pid", help="trace this PID only") parser.add_argument("-P", "--port", help="comma-separated list of destination ports to trace.") parser.add_argument("-U", "--print-uid", action="store_true", help="include UID on output") parser.add_argument("-u", "--uid", help="trace this UID only") parser.add_argument("-c", "--count", action="store_true", help="count connects per src ip and dest ip/port") parser.add_argument("--cgroupmap", help="trace cgroups in this BPF map only") parser.add_argument("--mntnsmap", help="trace mount namespaces in this BPF map only") parser.add_argument("--ebpf", action="store_true", help=argparse.SUPPRESS) args = parser.parse_args() #解析入參 debug = 0 # define BPF program bpf_text = """ #include <uapi/linux/ptrace.h> #include <net/sock.h> #include <bcc/proto.h> BPF_HASH(currsock, u32, struct sock *); #建立保存socket指針的哈希 // separate data structs for ipv4 and ipv6 struct ipv4_data_t { u64 ts_us; u32 pid; u32 uid; u32 saddr; u32 daddr; u64 ip; u16 dport; char task[TASK_COMM_LEN]; }; BPF_PERF_OUTPUT(ipv4_events); //建立ipv4的輸出 struct ipv6_data_t { u64 ts_us; u32 pid; u32 uid; unsigned __int128 saddr; unsigned __int128 daddr; u64 ip; u16 dport; char task[TASK_COMM_LEN]; }; BPF_PERF_OUTPUT(ipv6_events); //建立ipv6的輸出 // separate flow keys per address family struct ipv4_flow_key_t { //用於根據地址統計執行tcp_v4_connect的次數,即指定了"-c"或"--count"選項 u32 saddr; u32 daddr; u16 dport; }; BPF_HASH(ipv4_count, struct ipv4_flow_key_t); //統計執行tcp_v4_connect的次數 struct ipv6_flow_key_t { //用於根據地址統計執行tcp_v6_connect的次數,即指定了"-c"或"--count"選項 unsigned __int128 saddr; unsigned __int128 daddr; u16 dport; }; BPF_HASH(ipv6_count, struct ipv6_flow_key_t); //統計執行tcp_v6_connect的次數 int trace_connect_entry(struct pt_regs *ctx, struct sock *sk) //在進入tcp_v4_connect時調用 { if (container_should_be_filtered()) { return 0; } u64 pid_tgid = bpf_get_current_pid_tgid(); //獲取64位的pid_tgid u32 pid = pid_tgid >> 32; //tgid位於高32位,右移32位獲取 u32 tid = pid_tgid; //tid線程惟一 FILTER_PID //bpf程序對python來講就是一段字符串,此處能夠看做是一個標記符,後續使用python的string.replace進行替換。此處表示過濾特定的PID u32 uid = bpf_get_current_uid_gid(); FILTER_UID //過濾特定的UID // stash the sock ptr for lookup on return currsock.update(&tid, &sk); //使用tid做爲key,保存sk指針指向的地址 return 0; }; static int trace_connect_return(struct pt_regs *ctx, short ipver) //在從tcp_v4_connect返回時調用 { int ret = PT_REGS_RC(ctx); //獲取tcp_v4_connect函數的返回值 u64 pid_tgid = bpf_get_current_pid_tgid(); u32 pid = pid_tgid >> 32; u32 tid = pid_tgid; struct sock **skpp; skpp = currsock.lookup(&tid); //判斷當前線程在進入tcp_v4_connect時是否打點採集,便是否執行了上面的trace_connect_entry if (skpp == 0) { return 0; // missed entry } if (ret != 0) { //若是tcp_v4_connect的返回值非0,表示沒法發送SYNC報文 // failed to send SYNC packet, may not have populated // socket __sk_common.{skc_rcv_saddr, ...} currsock.delete(&tid); //本次採集失敗,刪除哈希 return 0; } // pull in details struct sock *skp = *skpp; u16 dport = skp->__sk_common.skc_dport; FILTER_PORT //過濾特定的端口 if (ipver == 4) { IPV4_CODE //根據入參替換爲IPV4的處理 } else /* 6 */ { IPV6_CODE //根據入參替換爲位IPV6的處理 } currsock.delete(&tid); return 0; } int trace_connect_v4_return(struct pt_regs *ctx) { return trace_connect_return(ctx, 4); } int trace_connect_v6_return(struct pt_regs *ctx) { return trace_connect_return(ctx, 6); } """ struct_init = { 'ipv4': { 'count' : #統計執行tcp_v4_connect的次數 """ struct ipv4_flow_key_t flow_key = {}; flow_key.saddr = skp->__sk_common.skc_rcv_saddr; flow_key.daddr = skp->__sk_common.skc_daddr; flow_key.dport = ntohs(dport); ipv4_count.increment(flow_key);""", 'trace' : #默認執行tcp_v4_connect的跟蹤,記錄地址,端口等信息 """ struct ipv4_data_t data4 = {.pid = pid, .ip = ipver}; data4.uid = bpf_get_current_uid_gid(); data4.ts_us = bpf_ktime_get_ns() / 1000; data4.saddr = skp->__sk_common.skc_rcv_saddr; data4.daddr = skp->__sk_common.skc_daddr; data4.dport = ntohs(dport); bpf_get_current_comm(&data4.task, sizeof(data4.task)); ipv4_events.perf_submit(ctx, &data4, sizeof(data4));""" }, 'ipv6': { 'count' :#統計執行tcp_v6_connect的次數 """ struct ipv6_flow_key_t flow_key = {}; bpf_probe_read_kernel(&flow_key.saddr, sizeof(flow_key.saddr), skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); bpf_probe_read_kernel(&flow_key.daddr, sizeof(flow_key.daddr), skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32); flow_key.dport = ntohs(dport); ipv6_count.increment(flow_key);""", 'trace' : #默認執行tcp_v6_connect的跟蹤,記錄地址,端口等信息 """ struct ipv6_data_t data6 = {.pid = pid, .ip = ipver}; data6.uid = bpf_get_current_uid_gid(); data6.ts_us = bpf_ktime_get_ns() / 1000; bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr), skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr), skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32); data6.dport = ntohs(dport); bpf_get_current_comm(&data6.task, sizeof(data6.task)); ipv6_events.perf_submit(ctx, &data6, sizeof(data6));""" } } # code substitutions if args.count: #若是入參指定了"-c"或"-count",則執行count bpf_text = bpf_text.replace("IPV4_CODE", struct_init['ipv4']['count']) bpf_text = bpf_text.replace("IPV6_CODE", struct_init['ipv6']['count']) else: #若是入參沒有指定"-c"或"-count",則執行trace bpf_text = bpf_text.replace("IPV4_CODE", struct_init['ipv4']['trace']) bpf_text = bpf_text.replace("IPV6_CODE", struct_init['ipv6']['trace']) if args.pid: #若是入參指定了"-p"或"--pid",則對PID進行過濾 bpf_text = bpf_text.replace('FILTER_PID', 'if (pid != %s) { return 0; }' % args.pid) if args.port:#若是入參指定了"-P"或"--port",則對端口進行過濾 dports = [int(dport) for dport in args.port.split(',')] dports_if = ' && '.join(['dport != %d' % ntohs(dport) for dport in dports]) bpf_text = bpf_text.replace('FILTER_PORT', 'if (%s) { currsock.delete(&pid); return 0; }' % dports_if) if args.uid:#若是入參指定了"-u"或"--uid",則對UID進行過濾 bpf_text = bpf_text.replace('FILTER_UID', 'if (uid != %s) { return 0; }' % args.uid) bpf_text = filter_by_containers(args) + bpf_text #下面的處理在沒有指定特定的過濾時去除標記符 bpf_text = bpf_text.replace('FILTER_PID', '') bpf_text = bpf_text.replace('FILTER_PORT', '') bpf_text = bpf_text.replace('FILTER_UID', '') if debug or args.ebpf: print(bpf_text) if args.ebpf: exit() # process event def print_ipv4_event(cpu, data, size): #TCP4跟蹤的打印函數 event = b["ipv4_events"].event(data) global start_ts if args.timestamp: if start_ts == 0: start_ts = event.ts_us printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="") if args.print_uid: printb(b"%-6d" % event.uid, nl="") printb(b"%-6d %-12.12s %-2d %-16s %-16s %-4d" % (event.pid, event.task, event.ip, inet_ntop(AF_INET, pack("I", event.saddr)).encode(), #轉換爲主機序地址 inet_ntop(AF_INET, pack("I", event.daddr)).encode(), event.dport)) #轉換爲主機序地址和端口 def print_ipv6_event(cpu, data, size): #TCP6跟蹤的打印函數 event = b["ipv6_events"].event(data) global start_ts if args.timestamp: if start_ts == 0: start_ts = event.ts_us printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="") if args.print_uid: printb(b"%-6d" % event.uid, nl="") printb(b"%-6d %-12.12s %-2d %-16s %-16s %-4d" % (event.pid, event.task, event.ip, inet_ntop(AF_INET6, event.saddr).encode(), inet_ntop(AF_INET6, event.daddr).encode(), event.dport)) def depict_cnt(counts_tab, l3prot='ipv4'): # for k, v in sorted(counts_tab.items(), key=lambda counts: counts[1].value, reverse=True): depict_key = "" if l3prot == 'ipv4': depict_key = "%-25s %-25s %-20s" % ((inet_ntop(AF_INET, pack('I', k.saddr))), inet_ntop(AF_INET, pack('I', k.daddr)), k.dport) else: depict_key = "%-25s %-25s %-20s" % ((inet_ntop(AF_INET6, k.saddr)), inet_ntop(AF_INET6, k.daddr), k.dport) print ("%s %-10d" % (depict_key, v.value)) # initialize BPF b = BPF(text=bpf_text) b.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect_entry") b.attach_kprobe(event="tcp_v6_connect", fn_name="trace_connect_entry") b.attach_kretprobe(event="tcp_v4_connect", fn_name="trace_connect_v4_return") b.attach_kretprobe(event="tcp_v6_connect", fn_name="trace_connect_v6_return") print("Tracing connect ... Hit Ctrl-C to end") if args.count: try: while 1: sleep(99999999) except KeyboardInterrupt: pass # header print("\n%-25s %-25s %-20s %-10s" % ( "LADDR", "RADDR", "RPORT", "CONNECTS")) depict_cnt(b["ipv4_count"]) depict_cnt(b["ipv6_count"], l3prot='ipv6') # read events else: # header if args.timestamp: print("%-9s" % ("TIME(s)"), end="") if args.print_uid: print("%-6s" % ("UID"), end="") print("%-6s %-12s %-2s %-16s %-16s %-4s" % ("PID", "COMM", "IP", "SADDR", "DADDR", "DPORT")) start_ts = 0 # read events b["ipv4_events"].open_perf_buffer(print_ipv4_event) b["ipv6_events"].open_perf_buffer(print_ipv6_event) while 1: try: b.perf_buffer_poll() except KeyboardInterrupt: exit()
上面C程序採集了內核數據skp->sk_common.skc_dport,skp->sk_common.skc_rcv_saddr和skp->__sk_common.skc_daddr。與第一個例子相似,這類數據能夠直接參考tcp_v4_connect內核源碼的實現,源碼中經過struct inet_sock *inet = inet_sk(sk);
來獲取源目的地址和端口,inet_sock的結構體定義以下,能夠明顯看到inet_daddr,inet_rcv_saddr和inet_dport與上述代碼獲取的內容相同,進而能夠了解到獲取這些成員的方式。
struct inet_sock { /* sk and pinet6 has to be the first two members of inet_sock */ struct sock sk; #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *pinet6; #endif /* Socket demultiplex comparisons on incoming packets. */ #define inet_daddr sk.__sk_common.skc_daddr #define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr #define inet_dport sk.__sk_common.skc_dport #define inet_num sk.__sk_common.skc_num ...
此外在inet_sock
結構體的註釋中給出詳細的說明,很是明瞭:
* @inet_daddr - Foreign IPv4 addr * @inet_rcv_saddr - Bound local IPv4 addr * @inet_dport - Destination port * @inet_num - Local port
所以能夠直接參考tcp_v4_connect
的源碼修改ipv4中獲取地址和端口的實現,效果是同樣的:
struct_init = { 'ipv4': { 'count' : """ struct ipv4_flow_key_t flow_key = {}; struct inet_sock *inet = inet_sk(skp); flow_key.saddr = inet->inet_rcv_saddr; flow_key.daddr = inet->inet_daddr; u16 dport = inet->inet_dport; flow_key.dport = ntohs(dport); ipv4_count.increment(flow_key);""", 'trace' : """ struct ipv4_data_t data4 = {.pid = pid, .ip = ipver}; data4.uid = bpf_get_current_uid_gid(); data4.ts_us = bpf_ktime_get_ns() / 1000; struct inet_sock *inet = inet_sk(skp); data4.saddr = inet->inet_rcv_saddr; data4.daddr = inet->inet_daddr; u16 dport = inet->inet_dport; data4.dport = ntohs(dport); bpf_get_current_comm(&data4.task, sizeof(data4.task)); ipv4_events.perf_submit(ctx, &data4, sizeof(data4));""" }, 'ipv6': { 'count' : """ struct ipv6_flow_key_t flow_key = {}; bpf_probe_read_kernel(&flow_key.saddr, sizeof(flow_key.saddr), skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); bpf_probe_read_kernel(&flow_key.daddr, sizeof(flow_key.daddr), skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32); flow_key.dport = ntohs(dport); ipv6_count.increment(flow_key);""", 'trace' : """ struct ipv6_data_t data6 = {.pid = pid, .ip = ipver}; data6.uid = bpf_get_current_uid_gid(); data6.ts_us = bpf_ktime_get_ns() / 1000; bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr), skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr), skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32); data6.dport = ntohs(dport); bpf_get_current_comm(&data6.task, sizeof(data6.task)); ipv6_events.perf_submit(ctx, &data6, sizeof(data6));""" } }
此外注意到讀取TCP4的數據時沒有用到bpf_probe_read_kernel,但讀取TCP6的數據時用到了bpf_probe_read_kernel
,這是由於TCP4的地址是一個u32
類型的數據,直接賦值便可;而TCP6的地址結構以下,沒法經過直接賦值獲取,所以須要調用bpf_probe_read_kernel
拷貝內存。
struct in6_addr { union { __u8 u6_addr8[16]; #if __UAPI_DEF_IN6_ADDR_ALT __be16 u6_addr16[8]; __be32 u6_addr32[4]; #endif } in6_u; #define s6_addr in6_u.u6_addr8 #if __UAPI_DEF_IN6_ADDR_ALT #define s6_addr16 in6_u.u6_addr16 #define s6_addr32 in6_u.u6_addr32 #endif };
總體看,上面代碼使用了python處理了一些C程序的替換和拼接,大部分跟可觀測性並無什麼不一樣,固然,最主要的仍是須要了解內核處理流程,選擇正確的內核函數進行打點。
上述僅給出的網絡並沒有法修改報文內容以及對報文進行重定向等操做。ebpf提供了XDP和tc兩種管理網絡的方式,更多能夠參見下一篇博客。