xdp_redirect_cpu_kern.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. /* XDP redirect to CPUs via cpumap (BPF_MAP_TYPE_CPUMAP)
  2. *
  3. * GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
  4. */
  5. #include <uapi/linux/if_ether.h>
  6. #include <uapi/linux/if_packet.h>
  7. #include <uapi/linux/if_vlan.h>
  8. #include <uapi/linux/ip.h>
  9. #include <uapi/linux/ipv6.h>
  10. #include <uapi/linux/in.h>
  11. #include <uapi/linux/tcp.h>
  12. #include <uapi/linux/udp.h>
  13. #include <uapi/linux/bpf.h>
  14. #include <bpf/bpf_helpers.h>
  15. #include "hash_func01.h"
  16. #define MAX_CPUS NR_CPUS
  17. /* Special map type that can XDP_REDIRECT frames to another CPU */
  18. struct {
  19. __uint(type, BPF_MAP_TYPE_CPUMAP);
  20. __uint(key_size, sizeof(u32));
  21. __uint(value_size, sizeof(struct bpf_cpumap_val));
  22. __uint(max_entries, MAX_CPUS);
  23. } cpu_map SEC(".maps");
  24. /* Common stats data record to keep userspace more simple */
  25. struct datarec {
  26. __u64 processed;
  27. __u64 dropped;
  28. __u64 issue;
  29. __u64 xdp_pass;
  30. __u64 xdp_drop;
  31. __u64 xdp_redirect;
  32. };
  33. /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
  34. * feedback. Redirect TX errors can be caught via a tracepoint.
  35. */
  36. struct {
  37. __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  38. __type(key, u32);
  39. __type(value, struct datarec);
  40. __uint(max_entries, 1);
  41. } rx_cnt SEC(".maps");
  42. /* Used by trace point */
  43. struct {
  44. __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  45. __type(key, u32);
  46. __type(value, struct datarec);
  47. __uint(max_entries, 2);
  48. /* TODO: have entries for all possible errno's */
  49. } redirect_err_cnt SEC(".maps");
  50. /* Used by trace point */
  51. struct {
  52. __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  53. __type(key, u32);
  54. __type(value, struct datarec);
  55. __uint(max_entries, MAX_CPUS);
  56. } cpumap_enqueue_cnt SEC(".maps");
  57. /* Used by trace point */
  58. struct {
  59. __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  60. __type(key, u32);
  61. __type(value, struct datarec);
  62. __uint(max_entries, 1);
  63. } cpumap_kthread_cnt SEC(".maps");
  64. /* Set of maps controlling available CPU, and for iterating through
  65. * selectable redirect CPUs.
  66. */
  67. struct {
  68. __uint(type, BPF_MAP_TYPE_ARRAY);
  69. __type(key, u32);
  70. __type(value, u32);
  71. __uint(max_entries, MAX_CPUS);
  72. } cpus_available SEC(".maps");
  73. struct {
  74. __uint(type, BPF_MAP_TYPE_ARRAY);
  75. __type(key, u32);
  76. __type(value, u32);
  77. __uint(max_entries, 1);
  78. } cpus_count SEC(".maps");
  79. struct {
  80. __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  81. __type(key, u32);
  82. __type(value, u32);
  83. __uint(max_entries, 1);
  84. } cpus_iterator SEC(".maps");
  85. /* Used by trace point */
  86. struct {
  87. __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
  88. __type(key, u32);
  89. __type(value, struct datarec);
  90. __uint(max_entries, 1);
  91. } exception_cnt SEC(".maps");
  92. /* Helper parse functions */
  93. /* Parse Ethernet layer 2, extract network layer 3 offset and protocol
  94. *
  95. * Returns false on error and non-supported ether-type
  96. */
  97. struct vlan_hdr {
  98. __be16 h_vlan_TCI;
  99. __be16 h_vlan_encapsulated_proto;
  100. };
  101. static __always_inline
  102. bool parse_eth(struct ethhdr *eth, void *data_end,
  103. u16 *eth_proto, u64 *l3_offset)
  104. {
  105. u16 eth_type;
  106. u64 offset;
  107. offset = sizeof(*eth);
  108. if ((void *)eth + offset > data_end)
  109. return false;
  110. eth_type = eth->h_proto;
  111. /* Skip non 802.3 Ethertypes */
  112. if (unlikely(ntohs(eth_type) < ETH_P_802_3_MIN))
  113. return false;
  114. /* Handle VLAN tagged packet */
  115. if (eth_type == htons(ETH_P_8021Q) || eth_type == htons(ETH_P_8021AD)) {
  116. struct vlan_hdr *vlan_hdr;
  117. vlan_hdr = (void *)eth + offset;
  118. offset += sizeof(*vlan_hdr);
  119. if ((void *)eth + offset > data_end)
  120. return false;
  121. eth_type = vlan_hdr->h_vlan_encapsulated_proto;
  122. }
  123. /* Handle double VLAN tagged packet */
  124. if (eth_type == htons(ETH_P_8021Q) || eth_type == htons(ETH_P_8021AD)) {
  125. struct vlan_hdr *vlan_hdr;
  126. vlan_hdr = (void *)eth + offset;
  127. offset += sizeof(*vlan_hdr);
  128. if ((void *)eth + offset > data_end)
  129. return false;
  130. eth_type = vlan_hdr->h_vlan_encapsulated_proto;
  131. }
  132. *eth_proto = ntohs(eth_type);
  133. *l3_offset = offset;
  134. return true;
  135. }
  136. static __always_inline
  137. u16 get_dest_port_ipv4_udp(struct xdp_md *ctx, u64 nh_off)
  138. {
  139. void *data_end = (void *)(long)ctx->data_end;
  140. void *data = (void *)(long)ctx->data;
  141. struct iphdr *iph = data + nh_off;
  142. struct udphdr *udph;
  143. u16 dport;
  144. if (iph + 1 > data_end)
  145. return 0;
  146. if (!(iph->protocol == IPPROTO_UDP))
  147. return 0;
  148. udph = (void *)(iph + 1);
  149. if (udph + 1 > data_end)
  150. return 0;
  151. dport = ntohs(udph->dest);
  152. return dport;
  153. }
  154. static __always_inline
  155. int get_proto_ipv4(struct xdp_md *ctx, u64 nh_off)
  156. {
  157. void *data_end = (void *)(long)ctx->data_end;
  158. void *data = (void *)(long)ctx->data;
  159. struct iphdr *iph = data + nh_off;
  160. if (iph + 1 > data_end)
  161. return 0;
  162. return iph->protocol;
  163. }
  164. static __always_inline
  165. int get_proto_ipv6(struct xdp_md *ctx, u64 nh_off)
  166. {
  167. void *data_end = (void *)(long)ctx->data_end;
  168. void *data = (void *)(long)ctx->data;
  169. struct ipv6hdr *ip6h = data + nh_off;
  170. if (ip6h + 1 > data_end)
  171. return 0;
  172. return ip6h->nexthdr;
  173. }
  174. SEC("xdp_cpu_map0")
  175. int xdp_prognum0_no_touch(struct xdp_md *ctx)
  176. {
  177. void *data_end = (void *)(long)ctx->data_end;
  178. void *data = (void *)(long)ctx->data;
  179. struct datarec *rec;
  180. u32 *cpu_selected;
  181. u32 cpu_dest;
  182. u32 key = 0;
  183. /* Only use first entry in cpus_available */
  184. cpu_selected = bpf_map_lookup_elem(&cpus_available, &key);
  185. if (!cpu_selected)
  186. return XDP_ABORTED;
  187. cpu_dest = *cpu_selected;
  188. /* Count RX packet in map */
  189. rec = bpf_map_lookup_elem(&rx_cnt, &key);
  190. if (!rec)
  191. return XDP_ABORTED;
  192. rec->processed++;
  193. if (cpu_dest >= MAX_CPUS) {
  194. rec->issue++;
  195. return XDP_ABORTED;
  196. }
  197. return bpf_redirect_map(&cpu_map, cpu_dest, 0);
  198. }
  199. SEC("xdp_cpu_map1_touch_data")
  200. int xdp_prognum1_touch_data(struct xdp_md *ctx)
  201. {
  202. void *data_end = (void *)(long)ctx->data_end;
  203. void *data = (void *)(long)ctx->data;
  204. struct ethhdr *eth = data;
  205. struct datarec *rec;
  206. u32 *cpu_selected;
  207. u32 cpu_dest;
  208. u16 eth_type;
  209. u32 key = 0;
  210. /* Only use first entry in cpus_available */
  211. cpu_selected = bpf_map_lookup_elem(&cpus_available, &key);
  212. if (!cpu_selected)
  213. return XDP_ABORTED;
  214. cpu_dest = *cpu_selected;
  215. /* Validate packet length is minimum Eth header size */
  216. if (eth + 1 > data_end)
  217. return XDP_ABORTED;
  218. /* Count RX packet in map */
  219. rec = bpf_map_lookup_elem(&rx_cnt, &key);
  220. if (!rec)
  221. return XDP_ABORTED;
  222. rec->processed++;
  223. /* Read packet data, and use it (drop non 802.3 Ethertypes) */
  224. eth_type = eth->h_proto;
  225. if (ntohs(eth_type) < ETH_P_802_3_MIN) {
  226. rec->dropped++;
  227. return XDP_DROP;
  228. }
  229. if (cpu_dest >= MAX_CPUS) {
  230. rec->issue++;
  231. return XDP_ABORTED;
  232. }
  233. return bpf_redirect_map(&cpu_map, cpu_dest, 0);
  234. }
  235. SEC("xdp_cpu_map2_round_robin")
  236. int xdp_prognum2_round_robin(struct xdp_md *ctx)
  237. {
  238. void *data_end = (void *)(long)ctx->data_end;
  239. void *data = (void *)(long)ctx->data;
  240. struct ethhdr *eth = data;
  241. struct datarec *rec;
  242. u32 cpu_dest;
  243. u32 *cpu_lookup;
  244. u32 key0 = 0;
  245. u32 *cpu_selected;
  246. u32 *cpu_iterator;
  247. u32 *cpu_max;
  248. u32 cpu_idx;
  249. cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
  250. if (!cpu_max)
  251. return XDP_ABORTED;
  252. cpu_iterator = bpf_map_lookup_elem(&cpus_iterator, &key0);
  253. if (!cpu_iterator)
  254. return XDP_ABORTED;
  255. cpu_idx = *cpu_iterator;
  256. *cpu_iterator += 1;
  257. if (*cpu_iterator == *cpu_max)
  258. *cpu_iterator = 0;
  259. cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
  260. if (!cpu_selected)
  261. return XDP_ABORTED;
  262. cpu_dest = *cpu_selected;
  263. /* Count RX packet in map */
  264. rec = bpf_map_lookup_elem(&rx_cnt, &key0);
  265. if (!rec)
  266. return XDP_ABORTED;
  267. rec->processed++;
  268. if (cpu_dest >= MAX_CPUS) {
  269. rec->issue++;
  270. return XDP_ABORTED;
  271. }
  272. return bpf_redirect_map(&cpu_map, cpu_dest, 0);
  273. }
  274. SEC("xdp_cpu_map3_proto_separate")
  275. int xdp_prognum3_proto_separate(struct xdp_md *ctx)
  276. {
  277. void *data_end = (void *)(long)ctx->data_end;
  278. void *data = (void *)(long)ctx->data;
  279. struct ethhdr *eth = data;
  280. u8 ip_proto = IPPROTO_UDP;
  281. struct datarec *rec;
  282. u16 eth_proto = 0;
  283. u64 l3_offset = 0;
  284. u32 cpu_dest = 0;
  285. u32 cpu_idx = 0;
  286. u32 *cpu_lookup;
  287. u32 key = 0;
  288. /* Count RX packet in map */
  289. rec = bpf_map_lookup_elem(&rx_cnt, &key);
  290. if (!rec)
  291. return XDP_ABORTED;
  292. rec->processed++;
  293. if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
  294. return XDP_PASS; /* Just skip */
  295. /* Extract L4 protocol */
  296. switch (eth_proto) {
  297. case ETH_P_IP:
  298. ip_proto = get_proto_ipv4(ctx, l3_offset);
  299. break;
  300. case ETH_P_IPV6:
  301. ip_proto = get_proto_ipv6(ctx, l3_offset);
  302. break;
  303. case ETH_P_ARP:
  304. cpu_idx = 0; /* ARP packet handled on separate CPU */
  305. break;
  306. default:
  307. cpu_idx = 0;
  308. }
  309. /* Choose CPU based on L4 protocol */
  310. switch (ip_proto) {
  311. case IPPROTO_ICMP:
  312. case IPPROTO_ICMPV6:
  313. cpu_idx = 2;
  314. break;
  315. case IPPROTO_TCP:
  316. cpu_idx = 0;
  317. break;
  318. case IPPROTO_UDP:
  319. cpu_idx = 1;
  320. break;
  321. default:
  322. cpu_idx = 0;
  323. }
  324. cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
  325. if (!cpu_lookup)
  326. return XDP_ABORTED;
  327. cpu_dest = *cpu_lookup;
  328. if (cpu_dest >= MAX_CPUS) {
  329. rec->issue++;
  330. return XDP_ABORTED;
  331. }
  332. return bpf_redirect_map(&cpu_map, cpu_dest, 0);
  333. }
  334. SEC("xdp_cpu_map4_ddos_filter_pktgen")
  335. int xdp_prognum4_ddos_filter_pktgen(struct xdp_md *ctx)
  336. {
  337. void *data_end = (void *)(long)ctx->data_end;
  338. void *data = (void *)(long)ctx->data;
  339. struct ethhdr *eth = data;
  340. u8 ip_proto = IPPROTO_UDP;
  341. struct datarec *rec;
  342. u16 eth_proto = 0;
  343. u64 l3_offset = 0;
  344. u32 cpu_dest = 0;
  345. u32 cpu_idx = 0;
  346. u16 dest_port;
  347. u32 *cpu_lookup;
  348. u32 key = 0;
  349. /* Count RX packet in map */
  350. rec = bpf_map_lookup_elem(&rx_cnt, &key);
  351. if (!rec)
  352. return XDP_ABORTED;
  353. rec->processed++;
  354. if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
  355. return XDP_PASS; /* Just skip */
  356. /* Extract L4 protocol */
  357. switch (eth_proto) {
  358. case ETH_P_IP:
  359. ip_proto = get_proto_ipv4(ctx, l3_offset);
  360. break;
  361. case ETH_P_IPV6:
  362. ip_proto = get_proto_ipv6(ctx, l3_offset);
  363. break;
  364. case ETH_P_ARP:
  365. cpu_idx = 0; /* ARP packet handled on separate CPU */
  366. break;
  367. default:
  368. cpu_idx = 0;
  369. }
  370. /* Choose CPU based on L4 protocol */
  371. switch (ip_proto) {
  372. case IPPROTO_ICMP:
  373. case IPPROTO_ICMPV6:
  374. cpu_idx = 2;
  375. break;
  376. case IPPROTO_TCP:
  377. cpu_idx = 0;
  378. break;
  379. case IPPROTO_UDP:
  380. cpu_idx = 1;
  381. /* DDoS filter UDP port 9 (pktgen) */
  382. dest_port = get_dest_port_ipv4_udp(ctx, l3_offset);
  383. if (dest_port == 9) {
  384. if (rec)
  385. rec->dropped++;
  386. return XDP_DROP;
  387. }
  388. break;
  389. default:
  390. cpu_idx = 0;
  391. }
  392. cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
  393. if (!cpu_lookup)
  394. return XDP_ABORTED;
  395. cpu_dest = *cpu_lookup;
  396. if (cpu_dest >= MAX_CPUS) {
  397. rec->issue++;
  398. return XDP_ABORTED;
  399. }
  400. return bpf_redirect_map(&cpu_map, cpu_dest, 0);
  401. }
  402. /* Hashing initval */
  403. #define INITVAL 15485863
  404. static __always_inline
  405. u32 get_ipv4_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
  406. {
  407. void *data_end = (void *)(long)ctx->data_end;
  408. void *data = (void *)(long)ctx->data;
  409. struct iphdr *iph = data + nh_off;
  410. u32 cpu_hash;
  411. if (iph + 1 > data_end)
  412. return 0;
  413. cpu_hash = iph->saddr + iph->daddr;
  414. cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol);
  415. return cpu_hash;
  416. }
  417. static __always_inline
  418. u32 get_ipv6_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
  419. {
  420. void *data_end = (void *)(long)ctx->data_end;
  421. void *data = (void *)(long)ctx->data;
  422. struct ipv6hdr *ip6h = data + nh_off;
  423. u32 cpu_hash;
  424. if (ip6h + 1 > data_end)
  425. return 0;
  426. cpu_hash = ip6h->saddr.s6_addr32[0] + ip6h->daddr.s6_addr32[0];
  427. cpu_hash += ip6h->saddr.s6_addr32[1] + ip6h->daddr.s6_addr32[1];
  428. cpu_hash += ip6h->saddr.s6_addr32[2] + ip6h->daddr.s6_addr32[2];
  429. cpu_hash += ip6h->saddr.s6_addr32[3] + ip6h->daddr.s6_addr32[3];
  430. cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + ip6h->nexthdr);
  431. return cpu_hash;
  432. }
  433. /* Load-Balance traffic based on hashing IP-addrs + L4-proto. The
  434. * hashing scheme is symmetric, meaning swapping IP src/dest still hit
  435. * same CPU.
  436. */
  437. SEC("xdp_cpu_map5_lb_hash_ip_pairs")
  438. int xdp_prognum5_lb_hash_ip_pairs(struct xdp_md *ctx)
  439. {
  440. void *data_end = (void *)(long)ctx->data_end;
  441. void *data = (void *)(long)ctx->data;
  442. struct ethhdr *eth = data;
  443. u8 ip_proto = IPPROTO_UDP;
  444. struct datarec *rec;
  445. u16 eth_proto = 0;
  446. u64 l3_offset = 0;
  447. u32 cpu_dest = 0;
  448. u32 cpu_idx = 0;
  449. u32 *cpu_lookup;
  450. u32 *cpu_max;
  451. u32 cpu_hash;
  452. u32 key = 0;
  453. /* Count RX packet in map */
  454. rec = bpf_map_lookup_elem(&rx_cnt, &key);
  455. if (!rec)
  456. return XDP_ABORTED;
  457. rec->processed++;
  458. cpu_max = bpf_map_lookup_elem(&cpus_count, &key);
  459. if (!cpu_max)
  460. return XDP_ABORTED;
  461. if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
  462. return XDP_PASS; /* Just skip */
  463. /* Hash for IPv4 and IPv6 */
  464. switch (eth_proto) {
  465. case ETH_P_IP:
  466. cpu_hash = get_ipv4_hash_ip_pair(ctx, l3_offset);
  467. break;
  468. case ETH_P_IPV6:
  469. cpu_hash = get_ipv6_hash_ip_pair(ctx, l3_offset);
  470. break;
  471. case ETH_P_ARP: /* ARP packet handled on CPU idx 0 */
  472. default:
  473. cpu_hash = 0;
  474. }
  475. /* Choose CPU based on hash */
  476. cpu_idx = cpu_hash % *cpu_max;
  477. cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
  478. if (!cpu_lookup)
  479. return XDP_ABORTED;
  480. cpu_dest = *cpu_lookup;
  481. if (cpu_dest >= MAX_CPUS) {
  482. rec->issue++;
  483. return XDP_ABORTED;
  484. }
  485. return bpf_redirect_map(&cpu_map, cpu_dest, 0);
  486. }
  487. char _license[] SEC("license") = "GPL";
  488. /*** Trace point code ***/
  489. /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
  490. * Code in: kernel/include/trace/events/xdp.h
  491. */
  492. struct xdp_redirect_ctx {
  493. u64 __pad; // First 8 bytes are not accessible by bpf code
  494. int prog_id; // offset:8; size:4; signed:1;
  495. u32 act; // offset:12 size:4; signed:0;
  496. int ifindex; // offset:16 size:4; signed:1;
  497. int err; // offset:20 size:4; signed:1;
  498. int to_ifindex; // offset:24 size:4; signed:1;
  499. u32 map_id; // offset:28 size:4; signed:0;
  500. int map_index; // offset:32 size:4; signed:1;
  501. }; // offset:36
  502. enum {
  503. XDP_REDIRECT_SUCCESS = 0,
  504. XDP_REDIRECT_ERROR = 1
  505. };
  506. static __always_inline
  507. int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
  508. {
  509. u32 key = XDP_REDIRECT_ERROR;
  510. struct datarec *rec;
  511. int err = ctx->err;
  512. if (!err)
  513. key = XDP_REDIRECT_SUCCESS;
  514. rec = bpf_map_lookup_elem(&redirect_err_cnt, &key);
  515. if (!rec)
  516. return 0;
  517. rec->dropped += 1;
  518. return 0; /* Indicate event was filtered (no further processing)*/
  519. /*
  520. * Returning 1 here would allow e.g. a perf-record tracepoint
  521. * to see and record these events, but it doesn't work well
  522. * in-practice as stopping perf-record also unload this
  523. * bpf_prog. Plus, there is additional overhead of doing so.
  524. */
  525. }
  526. SEC("tracepoint/xdp/xdp_redirect_err")
  527. int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
  528. {
  529. return xdp_redirect_collect_stat(ctx);
  530. }
  531. SEC("tracepoint/xdp/xdp_redirect_map_err")
  532. int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
  533. {
  534. return xdp_redirect_collect_stat(ctx);
  535. }
  536. /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
  537. * Code in: kernel/include/trace/events/xdp.h
  538. */
  539. struct xdp_exception_ctx {
  540. u64 __pad; // First 8 bytes are not accessible by bpf code
  541. int prog_id; // offset:8; size:4; signed:1;
  542. u32 act; // offset:12; size:4; signed:0;
  543. int ifindex; // offset:16; size:4; signed:1;
  544. };
  545. SEC("tracepoint/xdp/xdp_exception")
  546. int trace_xdp_exception(struct xdp_exception_ctx *ctx)
  547. {
  548. struct datarec *rec;
  549. u32 key = 0;
  550. rec = bpf_map_lookup_elem(&exception_cnt, &key);
  551. if (!rec)
  552. return 1;
  553. rec->dropped += 1;
  554. return 0;
  555. }
  556. /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
  557. * Code in: kernel/include/trace/events/xdp.h
  558. */
  559. struct cpumap_enqueue_ctx {
  560. u64 __pad; // First 8 bytes are not accessible by bpf code
  561. int map_id; // offset:8; size:4; signed:1;
  562. u32 act; // offset:12; size:4; signed:0;
  563. int cpu; // offset:16; size:4; signed:1;
  564. unsigned int drops; // offset:20; size:4; signed:0;
  565. unsigned int processed; // offset:24; size:4; signed:0;
  566. int to_cpu; // offset:28; size:4; signed:1;
  567. };
  568. SEC("tracepoint/xdp/xdp_cpumap_enqueue")
  569. int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
  570. {
  571. u32 to_cpu = ctx->to_cpu;
  572. struct datarec *rec;
  573. if (to_cpu >= MAX_CPUS)
  574. return 1;
  575. rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
  576. if (!rec)
  577. return 0;
  578. rec->processed += ctx->processed;
  579. rec->dropped += ctx->drops;
  580. /* Record bulk events, then userspace can calc average bulk size */
  581. if (ctx->processed > 0)
  582. rec->issue += 1;
  583. /* Inception: It's possible to detect overload situations, via
  584. * this tracepoint. This can be used for creating a feedback
  585. * loop to XDP, which can take appropriate actions to mitigate
  586. * this overload situation.
  587. */
  588. return 0;
  589. }
  590. /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
  591. * Code in: kernel/include/trace/events/xdp.h
  592. */
  593. struct cpumap_kthread_ctx {
  594. u64 __pad; // First 8 bytes are not accessible
  595. int map_id; // offset:8; size:4; signed:1;
  596. u32 act; // offset:12; size:4; signed:0;
  597. int cpu; // offset:16; size:4; signed:1;
  598. unsigned int drops; // offset:20; size:4; signed:0;
  599. unsigned int processed; // offset:24; size:4; signed:0;
  600. int sched; // offset:28; size:4; signed:1;
  601. unsigned int xdp_pass; // offset:32; size:4; signed:0;
  602. unsigned int xdp_drop; // offset:36; size:4; signed:0;
  603. unsigned int xdp_redirect; // offset:40; size:4; signed:0;
  604. };
  605. SEC("tracepoint/xdp/xdp_cpumap_kthread")
  606. int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
  607. {
  608. struct datarec *rec;
  609. u32 key = 0;
  610. rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
  611. if (!rec)
  612. return 0;
  613. rec->processed += ctx->processed;
  614. rec->dropped += ctx->drops;
  615. rec->xdp_pass += ctx->xdp_pass;
  616. rec->xdp_drop += ctx->xdp_drop;
  617. rec->xdp_redirect += ctx->xdp_redirect;
  618. /* Count times kthread yielded CPU via schedule call */
  619. if (ctx->sched)
  620. rec->issue++;
  621. return 0;
  622. }