xdp_redirect_cpu_user.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
  3. */
  4. static const char *__doc__ =
  5. " XDP redirect with a CPU-map type \"BPF_MAP_TYPE_CPUMAP\"";
  6. #include <errno.h>
  7. #include <signal.h>
  8. #include <stdio.h>
  9. #include <stdlib.h>
  10. #include <stdbool.h>
  11. #include <string.h>
  12. #include <unistd.h>
  13. #include <locale.h>
  14. #include <sys/resource.h>
  15. #include <sys/sysinfo.h>
  16. #include <getopt.h>
  17. #include <net/if.h>
  18. #include <time.h>
  19. #include <linux/limits.h>
  20. #include <arpa/inet.h>
  21. #include <linux/if_link.h>
  22. /* How many xdp_progs are defined in _kern.c */
  23. #define MAX_PROG 6
  24. #include <bpf/bpf.h>
  25. #include <bpf/libbpf.h>
  26. #include "bpf_util.h"
  27. static int ifindex = -1;
  28. static char ifname_buf[IF_NAMESIZE];
  29. static char *ifname;
  30. static __u32 prog_id;
  31. static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
  32. static int n_cpus;
  33. enum map_type {
  34. CPU_MAP,
  35. RX_CNT,
  36. REDIRECT_ERR_CNT,
  37. CPUMAP_ENQUEUE_CNT,
  38. CPUMAP_KTHREAD_CNT,
  39. CPUS_AVAILABLE,
  40. CPUS_COUNT,
  41. CPUS_ITERATOR,
  42. EXCEPTION_CNT,
  43. };
  44. static const char *const map_type_strings[] = {
  45. [CPU_MAP] = "cpu_map",
  46. [RX_CNT] = "rx_cnt",
  47. [REDIRECT_ERR_CNT] = "redirect_err_cnt",
  48. [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
  49. [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
  50. [CPUS_AVAILABLE] = "cpus_available",
  51. [CPUS_COUNT] = "cpus_count",
  52. [CPUS_ITERATOR] = "cpus_iterator",
  53. [EXCEPTION_CNT] = "exception_cnt",
  54. };
  55. #define NUM_TP 5
  56. #define NUM_MAP 9
  57. struct bpf_link *tp_links[NUM_TP] = {};
  58. static int map_fds[NUM_MAP];
  59. static int tp_cnt = 0;
  60. /* Exit return codes */
  61. #define EXIT_OK 0
  62. #define EXIT_FAIL 1
  63. #define EXIT_FAIL_OPTION 2
  64. #define EXIT_FAIL_XDP 3
  65. #define EXIT_FAIL_BPF 4
  66. #define EXIT_FAIL_MEM 5
  67. static const struct option long_options[] = {
  68. {"help", no_argument, NULL, 'h' },
  69. {"dev", required_argument, NULL, 'd' },
  70. {"skb-mode", no_argument, NULL, 'S' },
  71. {"sec", required_argument, NULL, 's' },
  72. {"progname", required_argument, NULL, 'p' },
  73. {"qsize", required_argument, NULL, 'q' },
  74. {"cpu", required_argument, NULL, 'c' },
  75. {"stress-mode", no_argument, NULL, 'x' },
  76. {"no-separators", no_argument, NULL, 'z' },
  77. {"force", no_argument, NULL, 'F' },
  78. {"mprog-disable", no_argument, NULL, 'n' },
  79. {"mprog-name", required_argument, NULL, 'e' },
  80. {"mprog-filename", required_argument, NULL, 'f' },
  81. {"redirect-device", required_argument, NULL, 'r' },
  82. {"redirect-map", required_argument, NULL, 'm' },
  83. {0, 0, NULL, 0 }
  84. };
  85. static void int_exit(int sig)
  86. {
  87. __u32 curr_prog_id = 0;
  88. if (ifindex > -1) {
  89. if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
  90. printf("bpf_get_link_xdp_id failed\n");
  91. exit(EXIT_FAIL);
  92. }
  93. if (prog_id == curr_prog_id) {
  94. fprintf(stderr,
  95. "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
  96. ifindex, ifname);
  97. bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
  98. } else if (!curr_prog_id) {
  99. printf("couldn't find a prog id on a given iface\n");
  100. } else {
  101. printf("program on interface changed, not removing\n");
  102. }
  103. }
  104. /* Detach tracepoints */
  105. while (tp_cnt)
  106. bpf_link__destroy(tp_links[--tp_cnt]);
  107. exit(EXIT_OK);
  108. }
  109. static void print_avail_progs(struct bpf_object *obj)
  110. {
  111. struct bpf_program *pos;
  112. bpf_object__for_each_program(pos, obj) {
  113. if (bpf_program__is_xdp(pos))
  114. printf(" %s\n", bpf_program__section_name(pos));
  115. }
  116. }
  117. static void usage(char *argv[], struct bpf_object *obj)
  118. {
  119. int i;
  120. printf("\nDOCUMENTATION:\n%s\n", __doc__);
  121. printf("\n");
  122. printf(" Usage: %s (options-see-below)\n", argv[0]);
  123. printf(" Listing options:\n");
  124. for (i = 0; long_options[i].name != 0; i++) {
  125. printf(" --%-12s", long_options[i].name);
  126. if (long_options[i].flag != NULL)
  127. printf(" flag (internal value:%d)",
  128. *long_options[i].flag);
  129. else
  130. printf(" short-option: -%c",
  131. long_options[i].val);
  132. printf("\n");
  133. }
  134. printf("\n Programs to be used for --progname:\n");
  135. print_avail_progs(obj);
  136. printf("\n");
  137. }
  138. /* gettime returns the current time of day in nanoseconds.
  139. * Cost: clock_gettime (ns) => 26ns (CLOCK_MONOTONIC)
  140. * clock_gettime (ns) => 9ns (CLOCK_MONOTONIC_COARSE)
  141. */
  142. #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
  143. static __u64 gettime(void)
  144. {
  145. struct timespec t;
  146. int res;
  147. res = clock_gettime(CLOCK_MONOTONIC, &t);
  148. if (res < 0) {
  149. fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
  150. exit(EXIT_FAIL);
  151. }
  152. return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
  153. }
  154. /* Common stats data record shared with _kern.c */
  155. struct datarec {
  156. __u64 processed;
  157. __u64 dropped;
  158. __u64 issue;
  159. __u64 xdp_pass;
  160. __u64 xdp_drop;
  161. __u64 xdp_redirect;
  162. };
  163. struct record {
  164. __u64 timestamp;
  165. struct datarec total;
  166. struct datarec *cpu;
  167. };
  168. struct stats_record {
  169. struct record rx_cnt;
  170. struct record redir_err;
  171. struct record kthread;
  172. struct record exception;
  173. struct record enq[];
  174. };
  175. static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
  176. {
  177. /* For percpu maps, userspace gets a value per possible CPU */
  178. unsigned int nr_cpus = bpf_num_possible_cpus();
  179. struct datarec values[nr_cpus];
  180. __u64 sum_xdp_redirect = 0;
  181. __u64 sum_xdp_pass = 0;
  182. __u64 sum_xdp_drop = 0;
  183. __u64 sum_processed = 0;
  184. __u64 sum_dropped = 0;
  185. __u64 sum_issue = 0;
  186. int i;
  187. if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
  188. fprintf(stderr,
  189. "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
  190. return false;
  191. }
  192. /* Get time as close as possible to reading map contents */
  193. rec->timestamp = gettime();
  194. /* Record and sum values from each CPU */
  195. for (i = 0; i < nr_cpus; i++) {
  196. rec->cpu[i].processed = values[i].processed;
  197. sum_processed += values[i].processed;
  198. rec->cpu[i].dropped = values[i].dropped;
  199. sum_dropped += values[i].dropped;
  200. rec->cpu[i].issue = values[i].issue;
  201. sum_issue += values[i].issue;
  202. rec->cpu[i].xdp_pass = values[i].xdp_pass;
  203. sum_xdp_pass += values[i].xdp_pass;
  204. rec->cpu[i].xdp_drop = values[i].xdp_drop;
  205. sum_xdp_drop += values[i].xdp_drop;
  206. rec->cpu[i].xdp_redirect = values[i].xdp_redirect;
  207. sum_xdp_redirect += values[i].xdp_redirect;
  208. }
  209. rec->total.processed = sum_processed;
  210. rec->total.dropped = sum_dropped;
  211. rec->total.issue = sum_issue;
  212. rec->total.xdp_pass = sum_xdp_pass;
  213. rec->total.xdp_drop = sum_xdp_drop;
  214. rec->total.xdp_redirect = sum_xdp_redirect;
  215. return true;
  216. }
  217. static struct datarec *alloc_record_per_cpu(void)
  218. {
  219. unsigned int nr_cpus = bpf_num_possible_cpus();
  220. struct datarec *array;
  221. array = calloc(nr_cpus, sizeof(struct datarec));
  222. if (!array) {
  223. fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
  224. exit(EXIT_FAIL_MEM);
  225. }
  226. return array;
  227. }
  228. static struct stats_record *alloc_stats_record(void)
  229. {
  230. struct stats_record *rec;
  231. int i, size;
  232. size = sizeof(*rec) + n_cpus * sizeof(struct record);
  233. rec = malloc(size);
  234. if (!rec) {
  235. fprintf(stderr, "Mem alloc error\n");
  236. exit(EXIT_FAIL_MEM);
  237. }
  238. memset(rec, 0, size);
  239. rec->rx_cnt.cpu = alloc_record_per_cpu();
  240. rec->redir_err.cpu = alloc_record_per_cpu();
  241. rec->kthread.cpu = alloc_record_per_cpu();
  242. rec->exception.cpu = alloc_record_per_cpu();
  243. for (i = 0; i < n_cpus; i++)
  244. rec->enq[i].cpu = alloc_record_per_cpu();
  245. return rec;
  246. }
  247. static void free_stats_record(struct stats_record *r)
  248. {
  249. int i;
  250. for (i = 0; i < n_cpus; i++)
  251. free(r->enq[i].cpu);
  252. free(r->exception.cpu);
  253. free(r->kthread.cpu);
  254. free(r->redir_err.cpu);
  255. free(r->rx_cnt.cpu);
  256. free(r);
  257. }
  258. static double calc_period(struct record *r, struct record *p)
  259. {
  260. double period_ = 0;
  261. __u64 period = 0;
  262. period = r->timestamp - p->timestamp;
  263. if (period > 0)
  264. period_ = ((double) period / NANOSEC_PER_SEC);
  265. return period_;
  266. }
  267. static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
  268. {
  269. __u64 packets = 0;
  270. __u64 pps = 0;
  271. if (period_ > 0) {
  272. packets = r->processed - p->processed;
  273. pps = packets / period_;
  274. }
  275. return pps;
  276. }
  277. static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
  278. {
  279. __u64 packets = 0;
  280. __u64 pps = 0;
  281. if (period_ > 0) {
  282. packets = r->dropped - p->dropped;
  283. pps = packets / period_;
  284. }
  285. return pps;
  286. }
  287. static __u64 calc_errs_pps(struct datarec *r,
  288. struct datarec *p, double period_)
  289. {
  290. __u64 packets = 0;
  291. __u64 pps = 0;
  292. if (period_ > 0) {
  293. packets = r->issue - p->issue;
  294. pps = packets / period_;
  295. }
  296. return pps;
  297. }
  298. static void calc_xdp_pps(struct datarec *r, struct datarec *p,
  299. double *xdp_pass, double *xdp_drop,
  300. double *xdp_redirect, double period_)
  301. {
  302. *xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0;
  303. if (period_ > 0) {
  304. *xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_;
  305. *xdp_pass = (r->xdp_pass - p->xdp_pass) / period_;
  306. *xdp_drop = (r->xdp_drop - p->xdp_drop) / period_;
  307. }
  308. }
  309. static void stats_print(struct stats_record *stats_rec,
  310. struct stats_record *stats_prev,
  311. char *prog_name, char *mprog_name, int mprog_fd)
  312. {
  313. unsigned int nr_cpus = bpf_num_possible_cpus();
  314. double pps = 0, drop = 0, err = 0;
  315. bool mprog_enabled = false;
  316. struct record *rec, *prev;
  317. int to_cpu;
  318. double t;
  319. int i;
  320. if (mprog_fd > 0)
  321. mprog_enabled = true;
  322. /* Header */
  323. printf("Running XDP/eBPF prog_name:%s\n", prog_name);
  324. printf("%-15s %-7s %-14s %-11s %-9s\n",
  325. "XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");
  326. /* XDP rx_cnt */
  327. {
  328. char *fmt_rx = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
  329. char *fm2_rx = "%-15s %-7s %'-14.0f %'-11.0f\n";
  330. char *errstr = "";
  331. rec = &stats_rec->rx_cnt;
  332. prev = &stats_prev->rx_cnt;
  333. t = calc_period(rec, prev);
  334. for (i = 0; i < nr_cpus; i++) {
  335. struct datarec *r = &rec->cpu[i];
  336. struct datarec *p = &prev->cpu[i];
  337. pps = calc_pps(r, p, t);
  338. drop = calc_drop_pps(r, p, t);
  339. err = calc_errs_pps(r, p, t);
  340. if (err > 0)
  341. errstr = "cpu-dest/err";
  342. if (pps > 0)
  343. printf(fmt_rx, "XDP-RX",
  344. i, pps, drop, err, errstr);
  345. }
  346. pps = calc_pps(&rec->total, &prev->total, t);
  347. drop = calc_drop_pps(&rec->total, &prev->total, t);
  348. err = calc_errs_pps(&rec->total, &prev->total, t);
  349. printf(fm2_rx, "XDP-RX", "total", pps, drop);
  350. }
  351. /* cpumap enqueue stats */
  352. for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
  353. char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
  354. char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
  355. char *errstr = "";
  356. rec = &stats_rec->enq[to_cpu];
  357. prev = &stats_prev->enq[to_cpu];
  358. t = calc_period(rec, prev);
  359. for (i = 0; i < nr_cpus; i++) {
  360. struct datarec *r = &rec->cpu[i];
  361. struct datarec *p = &prev->cpu[i];
  362. pps = calc_pps(r, p, t);
  363. drop = calc_drop_pps(r, p, t);
  364. err = calc_errs_pps(r, p, t);
  365. if (err > 0) {
  366. errstr = "bulk-average";
  367. err = pps / err; /* calc average bulk size */
  368. }
  369. if (pps > 0)
  370. printf(fmt, "cpumap-enqueue",
  371. i, to_cpu, pps, drop, err, errstr);
  372. }
  373. pps = calc_pps(&rec->total, &prev->total, t);
  374. if (pps > 0) {
  375. drop = calc_drop_pps(&rec->total, &prev->total, t);
  376. err = calc_errs_pps(&rec->total, &prev->total, t);
  377. if (err > 0) {
  378. errstr = "bulk-average";
  379. err = pps / err; /* calc average bulk size */
  380. }
  381. printf(fm2, "cpumap-enqueue",
  382. "sum", to_cpu, pps, drop, err, errstr);
  383. }
  384. }
  385. /* cpumap kthread stats */
  386. {
  387. char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
  388. char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f %s\n";
  389. char *e_str = "";
  390. rec = &stats_rec->kthread;
  391. prev = &stats_prev->kthread;
  392. t = calc_period(rec, prev);
  393. for (i = 0; i < nr_cpus; i++) {
  394. struct datarec *r = &rec->cpu[i];
  395. struct datarec *p = &prev->cpu[i];
  396. pps = calc_pps(r, p, t);
  397. drop = calc_drop_pps(r, p, t);
  398. err = calc_errs_pps(r, p, t);
  399. if (err > 0)
  400. e_str = "sched";
  401. if (pps > 0)
  402. printf(fmt_k, "cpumap_kthread",
  403. i, pps, drop, err, e_str);
  404. }
  405. pps = calc_pps(&rec->total, &prev->total, t);
  406. drop = calc_drop_pps(&rec->total, &prev->total, t);
  407. err = calc_errs_pps(&rec->total, &prev->total, t);
  408. if (err > 0)
  409. e_str = "sched-sum";
  410. printf(fm2_k, "cpumap_kthread", "total", pps, drop, err, e_str);
  411. }
  412. /* XDP redirect err tracepoints (very unlikely) */
  413. {
  414. char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
  415. char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
  416. rec = &stats_rec->redir_err;
  417. prev = &stats_prev->redir_err;
  418. t = calc_period(rec, prev);
  419. for (i = 0; i < nr_cpus; i++) {
  420. struct datarec *r = &rec->cpu[i];
  421. struct datarec *p = &prev->cpu[i];
  422. pps = calc_pps(r, p, t);
  423. drop = calc_drop_pps(r, p, t);
  424. if (pps > 0)
  425. printf(fmt_err, "redirect_err", i, pps, drop);
  426. }
  427. pps = calc_pps(&rec->total, &prev->total, t);
  428. drop = calc_drop_pps(&rec->total, &prev->total, t);
  429. printf(fm2_err, "redirect_err", "total", pps, drop);
  430. }
  431. /* XDP general exception tracepoints */
  432. {
  433. char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
  434. char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
  435. rec = &stats_rec->exception;
  436. prev = &stats_prev->exception;
  437. t = calc_period(rec, prev);
  438. for (i = 0; i < nr_cpus; i++) {
  439. struct datarec *r = &rec->cpu[i];
  440. struct datarec *p = &prev->cpu[i];
  441. pps = calc_pps(r, p, t);
  442. drop = calc_drop_pps(r, p, t);
  443. if (pps > 0)
  444. printf(fmt_err, "xdp_exception", i, pps, drop);
  445. }
  446. pps = calc_pps(&rec->total, &prev->total, t);
  447. drop = calc_drop_pps(&rec->total, &prev->total, t);
  448. printf(fm2_err, "xdp_exception", "total", pps, drop);
  449. }
  450. /* CPUMAP attached XDP program that runs on remote/destination CPU */
  451. if (mprog_enabled) {
  452. char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f\n";
  453. char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f\n";
  454. double xdp_pass, xdp_drop, xdp_redirect;
  455. printf("\n2nd remote XDP/eBPF prog_name: %s\n", mprog_name);
  456. printf("%-15s %-7s %-14s %-11s %-9s\n",
  457. "XDP-cpumap", "CPU:to", "xdp-pass", "xdp-drop", "xdp-redir");
  458. rec = &stats_rec->kthread;
  459. prev = &stats_prev->kthread;
  460. t = calc_period(rec, prev);
  461. for (i = 0; i < nr_cpus; i++) {
  462. struct datarec *r = &rec->cpu[i];
  463. struct datarec *p = &prev->cpu[i];
  464. calc_xdp_pps(r, p, &xdp_pass, &xdp_drop,
  465. &xdp_redirect, t);
  466. if (xdp_pass > 0 || xdp_drop > 0 || xdp_redirect > 0)
  467. printf(fmt_k, "xdp-in-kthread", i, xdp_pass, xdp_drop,
  468. xdp_redirect);
  469. }
  470. calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop,
  471. &xdp_redirect, t);
  472. printf(fm2_k, "xdp-in-kthread", "total", xdp_pass, xdp_drop, xdp_redirect);
  473. }
  474. printf("\n");
  475. fflush(stdout);
  476. }
  477. static void stats_collect(struct stats_record *rec)
  478. {
  479. int fd, i;
  480. fd = map_fds[RX_CNT];
  481. map_collect_percpu(fd, 0, &rec->rx_cnt);
  482. fd = map_fds[REDIRECT_ERR_CNT];
  483. map_collect_percpu(fd, 1, &rec->redir_err);
  484. fd = map_fds[CPUMAP_ENQUEUE_CNT];
  485. for (i = 0; i < n_cpus; i++)
  486. map_collect_percpu(fd, i, &rec->enq[i]);
  487. fd = map_fds[CPUMAP_KTHREAD_CNT];
  488. map_collect_percpu(fd, 0, &rec->kthread);
  489. fd = map_fds[EXCEPTION_CNT];
  490. map_collect_percpu(fd, 0, &rec->exception);
  491. }
  492. /* Pointer swap trick */
  493. static inline void swap(struct stats_record **a, struct stats_record **b)
  494. {
  495. struct stats_record *tmp;
  496. tmp = *a;
  497. *a = *b;
  498. *b = tmp;
  499. }
  500. static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
  501. __u32 avail_idx, bool new)
  502. {
  503. __u32 curr_cpus_count = 0;
  504. __u32 key = 0;
  505. int ret;
  506. /* Add a CPU entry to cpumap, as this allocate a cpu entry in
  507. * the kernel for the cpu.
  508. */
  509. ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
  510. if (ret) {
  511. fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
  512. exit(EXIT_FAIL_BPF);
  513. }
  514. /* Inform bpf_prog's that a new CPU is available to select
  515. * from via some control maps.
  516. */
  517. ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
  518. if (ret) {
  519. fprintf(stderr, "Add to avail CPUs failed\n");
  520. exit(EXIT_FAIL_BPF);
  521. }
  522. /* When not replacing/updating existing entry, bump the count */
  523. ret = bpf_map_lookup_elem(map_fds[CPUS_COUNT], &key, &curr_cpus_count);
  524. if (ret) {
  525. fprintf(stderr, "Failed reading curr cpus_count\n");
  526. exit(EXIT_FAIL_BPF);
  527. }
  528. if (new) {
  529. curr_cpus_count++;
  530. ret = bpf_map_update_elem(map_fds[CPUS_COUNT], &key,
  531. &curr_cpus_count, 0);
  532. if (ret) {
  533. fprintf(stderr, "Failed write curr cpus_count\n");
  534. exit(EXIT_FAIL_BPF);
  535. }
  536. }
  537. /* map_fd[7] = cpus_iterator */
  538. printf("%s CPU:%u as idx:%u qsize:%d prog_fd: %d (cpus_count:%u)\n",
  539. new ? "Add-new":"Replace", cpu, avail_idx,
  540. value->qsize, value->bpf_prog.fd, curr_cpus_count);
  541. return 0;
  542. }
  543. /* CPUs are zero-indexed. Thus, add a special sentinel default value
  544. * in map cpus_available to mark CPU index'es not configured
  545. */
  546. static void mark_cpus_unavailable(void)
  547. {
  548. __u32 invalid_cpu = n_cpus;
  549. int ret, i;
  550. for (i = 0; i < n_cpus; i++) {
  551. ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &i,
  552. &invalid_cpu, 0);
  553. if (ret) {
  554. fprintf(stderr, "Failed marking CPU unavailable\n");
  555. exit(EXIT_FAIL_BPF);
  556. }
  557. }
  558. }
  559. /* Stress cpumap management code by concurrently changing underlying cpumap */
  560. static void stress_cpumap(struct bpf_cpumap_val *value)
  561. {
  562. /* Changing qsize will cause kernel to free and alloc a new
  563. * bpf_cpu_map_entry, with an associated/complicated tear-down
  564. * procedure.
  565. */
  566. value->qsize = 1024;
  567. create_cpu_entry(1, value, 0, false);
  568. value->qsize = 8;
  569. create_cpu_entry(1, value, 0, false);
  570. value->qsize = 16000;
  571. create_cpu_entry(1, value, 0, false);
  572. }
  573. static void stats_poll(int interval, bool use_separators, char *prog_name,
  574. char *mprog_name, struct bpf_cpumap_val *value,
  575. bool stress_mode)
  576. {
  577. struct stats_record *record, *prev;
  578. int mprog_fd;
  579. record = alloc_stats_record();
  580. prev = alloc_stats_record();
  581. stats_collect(record);
  582. /* Trick to pretty printf with thousands separators use %' */
  583. if (use_separators)
  584. setlocale(LC_NUMERIC, "en_US");
  585. while (1) {
  586. swap(&prev, &record);
  587. mprog_fd = value->bpf_prog.fd;
  588. stats_collect(record);
  589. stats_print(record, prev, prog_name, mprog_name, mprog_fd);
  590. sleep(interval);
  591. if (stress_mode)
  592. stress_cpumap(value);
  593. }
  594. free_stats_record(record);
  595. free_stats_record(prev);
  596. }
  597. static int init_tracepoints(struct bpf_object *obj)
  598. {
  599. struct bpf_program *prog;
  600. bpf_object__for_each_program(prog, obj) {
  601. if (bpf_program__is_tracepoint(prog) != true)
  602. continue;
  603. tp_links[tp_cnt] = bpf_program__attach(prog);
  604. if (libbpf_get_error(tp_links[tp_cnt])) {
  605. tp_links[tp_cnt] = NULL;
  606. return -EINVAL;
  607. }
  608. tp_cnt++;
  609. }
  610. return 0;
  611. }
  612. static int init_map_fds(struct bpf_object *obj)
  613. {
  614. enum map_type type;
  615. for (type = 0; type < NUM_MAP; type++) {
  616. map_fds[type] =
  617. bpf_object__find_map_fd_by_name(obj,
  618. map_type_strings[type]);
  619. if (map_fds[type] < 0)
  620. return -ENOENT;
  621. }
  622. return 0;
  623. }
  624. static int load_cpumap_prog(char *file_name, char *prog_name,
  625. char *redir_interface, char *redir_map)
  626. {
  627. struct bpf_prog_load_attr prog_load_attr = {
  628. .prog_type = BPF_PROG_TYPE_XDP,
  629. .expected_attach_type = BPF_XDP_CPUMAP,
  630. .file = file_name,
  631. };
  632. struct bpf_program *prog;
  633. struct bpf_object *obj;
  634. int fd;
  635. if (bpf_prog_load_xattr(&prog_load_attr, &obj, &fd))
  636. return -1;
  637. if (fd < 0) {
  638. fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
  639. strerror(errno));
  640. return fd;
  641. }
  642. if (redir_interface && redir_map) {
  643. int err, map_fd, ifindex_out, key = 0;
  644. map_fd = bpf_object__find_map_fd_by_name(obj, redir_map);
  645. if (map_fd < 0)
  646. return map_fd;
  647. ifindex_out = if_nametoindex(redir_interface);
  648. if (!ifindex_out)
  649. return -1;
  650. err = bpf_map_update_elem(map_fd, &key, &ifindex_out, 0);
  651. if (err < 0)
  652. return err;
  653. }
  654. prog = bpf_object__find_program_by_title(obj, prog_name);
  655. if (!prog) {
  656. fprintf(stderr, "bpf_object__find_program_by_title failed\n");
  657. return EXIT_FAIL;
  658. }
  659. return bpf_program__fd(prog);
  660. }
  661. int main(int argc, char **argv)
  662. {
  663. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  664. char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
  665. char *mprog_filename = "xdp_redirect_kern.o";
  666. char *redir_interface = NULL, *redir_map = NULL;
  667. char *mprog_name = "xdp_redirect_dummy";
  668. bool mprog_disable = false;
  669. struct bpf_prog_load_attr prog_load_attr = {
  670. .prog_type = BPF_PROG_TYPE_UNSPEC,
  671. };
  672. struct bpf_prog_info info = {};
  673. __u32 info_len = sizeof(info);
  674. struct bpf_cpumap_val value;
  675. bool use_separators = true;
  676. bool stress_mode = false;
  677. struct bpf_program *prog;
  678. struct bpf_object *obj;
  679. int err = EXIT_FAIL;
  680. char filename[256];
  681. int added_cpus = 0;
  682. int longindex = 0;
  683. int interval = 2;
  684. int add_cpu = -1;
  685. int opt, prog_fd;
  686. int *cpu, i;
  687. __u32 qsize;
  688. n_cpus = get_nprocs_conf();
  689. /* Notice: choosing he queue size is very important with the
  690. * ixgbe driver, because it's driver page recycling trick is
  691. * dependend on pages being returned quickly. The number of
  692. * out-standing packets in the system must be less-than 2x
  693. * RX-ring size.
  694. */
  695. qsize = 128+64;
  696. snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
  697. prog_load_attr.file = filename;
  698. if (setrlimit(RLIMIT_MEMLOCK, &r)) {
  699. perror("setrlimit(RLIMIT_MEMLOCK)");
  700. return 1;
  701. }
  702. if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
  703. return err;
  704. if (prog_fd < 0) {
  705. fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
  706. strerror(errno));
  707. return err;
  708. }
  709. if (init_tracepoints(obj) < 0) {
  710. fprintf(stderr, "ERR: bpf_program__attach failed\n");
  711. return err;
  712. }
  713. if (init_map_fds(obj) < 0) {
  714. fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
  715. return err;
  716. }
  717. mark_cpus_unavailable();
  718. cpu = malloc(n_cpus * sizeof(int));
  719. if (!cpu) {
  720. fprintf(stderr, "failed to allocate cpu array\n");
  721. return err;
  722. }
  723. memset(cpu, 0, n_cpus * sizeof(int));
  724. /* Parse commands line args */
  725. while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:n",
  726. long_options, &longindex)) != -1) {
  727. switch (opt) {
  728. case 'd':
  729. if (strlen(optarg) >= IF_NAMESIZE) {
  730. fprintf(stderr, "ERR: --dev name too long\n");
  731. goto error;
  732. }
  733. ifname = (char *)&ifname_buf;
  734. strncpy(ifname, optarg, IF_NAMESIZE);
  735. ifindex = if_nametoindex(ifname);
  736. if (ifindex == 0) {
  737. fprintf(stderr,
  738. "ERR: --dev name unknown err(%d):%s\n",
  739. errno, strerror(errno));
  740. goto error;
  741. }
  742. break;
  743. case 's':
  744. interval = atoi(optarg);
  745. break;
  746. case 'S':
  747. xdp_flags |= XDP_FLAGS_SKB_MODE;
  748. break;
  749. case 'x':
  750. stress_mode = true;
  751. break;
  752. case 'z':
  753. use_separators = false;
  754. break;
  755. case 'p':
  756. /* Selecting eBPF prog to load */
  757. prog_name = optarg;
  758. break;
  759. case 'n':
  760. mprog_disable = true;
  761. break;
  762. case 'f':
  763. mprog_filename = optarg;
  764. break;
  765. case 'e':
  766. mprog_name = optarg;
  767. break;
  768. case 'r':
  769. redir_interface = optarg;
  770. break;
  771. case 'm':
  772. redir_map = optarg;
  773. break;
  774. case 'c':
  775. /* Add multiple CPUs */
  776. add_cpu = strtoul(optarg, NULL, 0);
  777. if (add_cpu >= n_cpus) {
  778. fprintf(stderr,
  779. "--cpu nr too large for cpumap err(%d):%s\n",
  780. errno, strerror(errno));
  781. goto error;
  782. }
  783. cpu[added_cpus++] = add_cpu;
  784. break;
  785. case 'q':
  786. qsize = atoi(optarg);
  787. break;
  788. case 'F':
  789. xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
  790. break;
  791. case 'h':
  792. error:
  793. default:
  794. free(cpu);
  795. usage(argv, obj);
  796. return EXIT_FAIL_OPTION;
  797. }
  798. }
  799. if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
  800. xdp_flags |= XDP_FLAGS_DRV_MODE;
  801. /* Required option */
  802. if (ifindex == -1) {
  803. fprintf(stderr, "ERR: required option --dev missing\n");
  804. usage(argv, obj);
  805. err = EXIT_FAIL_OPTION;
  806. goto out;
  807. }
  808. /* Required option */
  809. if (add_cpu == -1) {
  810. fprintf(stderr, "ERR: required option --cpu missing\n");
  811. fprintf(stderr, " Specify multiple --cpu option to add more\n");
  812. usage(argv, obj);
  813. err = EXIT_FAIL_OPTION;
  814. goto out;
  815. }
  816. value.bpf_prog.fd = 0;
  817. if (!mprog_disable)
  818. value.bpf_prog.fd = load_cpumap_prog(mprog_filename, mprog_name,
  819. redir_interface, redir_map);
  820. if (value.bpf_prog.fd < 0) {
  821. err = value.bpf_prog.fd;
  822. goto out;
  823. }
  824. value.qsize = qsize;
  825. for (i = 0; i < added_cpus; i++)
  826. create_cpu_entry(cpu[i], &value, i, true);
  827. /* Remove XDP program when program is interrupted or killed */
  828. signal(SIGINT, int_exit);
  829. signal(SIGTERM, int_exit);
  830. prog = bpf_object__find_program_by_title(obj, prog_name);
  831. if (!prog) {
  832. fprintf(stderr, "bpf_object__find_program_by_title failed\n");
  833. goto out;
  834. }
  835. prog_fd = bpf_program__fd(prog);
  836. if (prog_fd < 0) {
  837. fprintf(stderr, "bpf_program__fd failed\n");
  838. goto out;
  839. }
  840. if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
  841. fprintf(stderr, "link set xdp fd failed\n");
  842. err = EXIT_FAIL_XDP;
  843. goto out;
  844. }
  845. err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
  846. if (err) {
  847. printf("can't get prog info - %s\n", strerror(errno));
  848. goto out;
  849. }
  850. prog_id = info.id;
  851. stats_poll(interval, use_separators, prog_name, mprog_name,
  852. &value, stress_mode);
  853. err = EXIT_OK;
  854. out:
  855. free(cpu);
  856. return err;
  857. }