xsk_fwd.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2020 Intel Corporation. */
  3. #define _GNU_SOURCE
  4. #include <poll.h>
  5. #include <pthread.h>
  6. #include <signal.h>
  7. #include <sched.h>
  8. #include <stdio.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #include <sys/mman.h>
  12. #include <sys/resource.h>
  13. #include <sys/socket.h>
  14. #include <sys/types.h>
  15. #include <time.h>
  16. #include <unistd.h>
  17. #include <getopt.h>
  18. #include <netinet/ether.h>
  19. #include <net/if.h>
  20. #include <linux/bpf.h>
  21. #include <linux/if_link.h>
  22. #include <linux/if_xdp.h>
  23. #include <bpf/libbpf.h>
  24. #include <bpf/xsk.h>
  25. #include <bpf/bpf.h>
  26. #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
  27. typedef __u64 u64;
  28. typedef __u32 u32;
  29. typedef __u16 u16;
  30. typedef __u8 u8;
  31. /* This program illustrates the packet forwarding between multiple AF_XDP
  32. * sockets in multi-threaded environment. All threads are sharing a common
  33. * buffer pool, with each socket having its own private buffer cache.
  34. *
  35. * Example 1: Single thread handling two sockets. The packets received by socket
  36. * A (interface IFA, queue QA) are forwarded to socket B (interface IFB, queue
  37. * QB), while the packets received by socket B are forwarded to socket A. The
  38. * thread is running on CPU core X:
  39. *
  40. * ./xsk_fwd -i IFA -q QA -i IFB -q QB -c X
  41. *
  42. * Example 2: Two threads, each handling two sockets. The thread running on CPU
  43. * core X forwards all the packets received by socket A to socket B, and all the
  44. * packets received by socket B to socket A. The thread running on CPU core Y is
  45. * performing the same packet forwarding between sockets C and D:
  46. *
  47. * ./xsk_fwd -i IFA -q QA -i IFB -q QB -i IFC -q QC -i IFD -q QD
  48. * -c CX -c CY
  49. */
  50. /*
  51. * Buffer pool and buffer cache
  52. *
  53. * For packet forwarding, the packet buffers are typically allocated from the
  54. * pool for packet reception and freed back to the pool for further reuse once
  55. * the packet transmission is completed.
  56. *
  57. * The buffer pool is shared between multiple threads. In order to minimize the
  58. * access latency to the shared buffer pool, each thread creates one (or
  59. * several) buffer caches, which, unlike the buffer pool, are private to the
  60. * thread that creates them and therefore cannot be shared with other threads.
  61. * The access to the shared pool is only needed either (A) when the cache gets
  62. * empty due to repeated buffer allocations and it needs to be replenished from
  63. * the pool, or (B) when the cache gets full due to repeated buffer free and it
  64. * needs to be flushed back to the pull.
  65. *
  66. * In a packet forwarding system, a packet received on any input port can
  67. * potentially be transmitted on any output port, depending on the forwarding
  68. * configuration. For AF_XDP sockets, for this to work with zero-copy of the
  69. * packet buffers when, it is required that the buffer pool memory fits into the
  70. * UMEM area shared by all the sockets.
  71. */
  72. struct bpool_params {
  73. u32 n_buffers;
  74. u32 buffer_size;
  75. int mmap_flags;
  76. u32 n_users_max;
  77. u32 n_buffers_per_slab;
  78. };
  79. /* This buffer pool implementation organizes the buffers into equally sized
  80. * slabs of *n_buffers_per_slab*. Initially, there are *n_slabs* slabs in the
  81. * pool that are completely filled with buffer pointers (full slabs).
  82. *
  83. * Each buffer cache has a slab for buffer allocation and a slab for buffer
  84. * free, with both of these slabs initially empty. When the cache's allocation
  85. * slab goes empty, it is swapped with one of the available full slabs from the
  86. * pool, if any is available. When the cache's free slab goes full, it is
  87. * swapped for one of the empty slabs from the pool, which is guaranteed to
  88. * succeed.
  89. *
  90. * Partially filled slabs never get traded between the cache and the pool
  91. * (except when the cache itself is destroyed), which enables fast operation
  92. * through pointer swapping.
  93. */
  94. struct bpool {
  95. struct bpool_params params;
  96. pthread_mutex_t lock;
  97. void *addr;
  98. u64 **slabs;
  99. u64 **slabs_reserved;
  100. u64 *buffers;
  101. u64 *buffers_reserved;
  102. u64 n_slabs;
  103. u64 n_slabs_reserved;
  104. u64 n_buffers;
  105. u64 n_slabs_available;
  106. u64 n_slabs_reserved_available;
  107. struct xsk_umem_config umem_cfg;
  108. struct xsk_ring_prod umem_fq;
  109. struct xsk_ring_cons umem_cq;
  110. struct xsk_umem *umem;
  111. };
  112. static struct bpool *
  113. bpool_init(struct bpool_params *params,
  114. struct xsk_umem_config *umem_cfg)
  115. {
  116. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  117. u64 n_slabs, n_slabs_reserved, n_buffers, n_buffers_reserved;
  118. u64 slabs_size, slabs_reserved_size;
  119. u64 buffers_size, buffers_reserved_size;
  120. u64 total_size, i;
  121. struct bpool *bp;
  122. u8 *p;
  123. int status;
  124. /* mmap prep. */
  125. if (setrlimit(RLIMIT_MEMLOCK, &r))
  126. return NULL;
  127. /* bpool internals dimensioning. */
  128. n_slabs = (params->n_buffers + params->n_buffers_per_slab - 1) /
  129. params->n_buffers_per_slab;
  130. n_slabs_reserved = params->n_users_max * 2;
  131. n_buffers = n_slabs * params->n_buffers_per_slab;
  132. n_buffers_reserved = n_slabs_reserved * params->n_buffers_per_slab;
  133. slabs_size = n_slabs * sizeof(u64 *);
  134. slabs_reserved_size = n_slabs_reserved * sizeof(u64 *);
  135. buffers_size = n_buffers * sizeof(u64);
  136. buffers_reserved_size = n_buffers_reserved * sizeof(u64);
  137. total_size = sizeof(struct bpool) +
  138. slabs_size + slabs_reserved_size +
  139. buffers_size + buffers_reserved_size;
  140. /* bpool memory allocation. */
  141. p = calloc(total_size, sizeof(u8));
  142. if (!p)
  143. return NULL;
  144. /* bpool memory initialization. */
  145. bp = (struct bpool *)p;
  146. memcpy(&bp->params, params, sizeof(*params));
  147. bp->params.n_buffers = n_buffers;
  148. bp->slabs = (u64 **)&p[sizeof(struct bpool)];
  149. bp->slabs_reserved = (u64 **)&p[sizeof(struct bpool) +
  150. slabs_size];
  151. bp->buffers = (u64 *)&p[sizeof(struct bpool) +
  152. slabs_size + slabs_reserved_size];
  153. bp->buffers_reserved = (u64 *)&p[sizeof(struct bpool) +
  154. slabs_size + slabs_reserved_size + buffers_size];
  155. bp->n_slabs = n_slabs;
  156. bp->n_slabs_reserved = n_slabs_reserved;
  157. bp->n_buffers = n_buffers;
  158. for (i = 0; i < n_slabs; i++)
  159. bp->slabs[i] = &bp->buffers[i * params->n_buffers_per_slab];
  160. bp->n_slabs_available = n_slabs;
  161. for (i = 0; i < n_slabs_reserved; i++)
  162. bp->slabs_reserved[i] = &bp->buffers_reserved[i *
  163. params->n_buffers_per_slab];
  164. bp->n_slabs_reserved_available = n_slabs_reserved;
  165. for (i = 0; i < n_buffers; i++)
  166. bp->buffers[i] = i * params->buffer_size;
  167. /* lock. */
  168. status = pthread_mutex_init(&bp->lock, NULL);
  169. if (status) {
  170. free(p);
  171. return NULL;
  172. }
  173. /* mmap. */
  174. bp->addr = mmap(NULL,
  175. n_buffers * params->buffer_size,
  176. PROT_READ | PROT_WRITE,
  177. MAP_PRIVATE | MAP_ANONYMOUS | params->mmap_flags,
  178. -1,
  179. 0);
  180. if (bp->addr == MAP_FAILED) {
  181. pthread_mutex_destroy(&bp->lock);
  182. free(p);
  183. return NULL;
  184. }
  185. /* umem. */
  186. status = xsk_umem__create(&bp->umem,
  187. bp->addr,
  188. bp->params.n_buffers * bp->params.buffer_size,
  189. &bp->umem_fq,
  190. &bp->umem_cq,
  191. umem_cfg);
  192. if (status) {
  193. munmap(bp->addr, bp->params.n_buffers * bp->params.buffer_size);
  194. pthread_mutex_destroy(&bp->lock);
  195. free(p);
  196. return NULL;
  197. }
  198. memcpy(&bp->umem_cfg, umem_cfg, sizeof(*umem_cfg));
  199. return bp;
  200. }
  201. static void
  202. bpool_free(struct bpool *bp)
  203. {
  204. if (!bp)
  205. return;
  206. xsk_umem__delete(bp->umem);
  207. munmap(bp->addr, bp->params.n_buffers * bp->params.buffer_size);
  208. pthread_mutex_destroy(&bp->lock);
  209. free(bp);
  210. }
  211. struct bcache {
  212. struct bpool *bp;
  213. u64 *slab_cons;
  214. u64 *slab_prod;
  215. u64 n_buffers_cons;
  216. u64 n_buffers_prod;
  217. };
  218. static u32
  219. bcache_slab_size(struct bcache *bc)
  220. {
  221. struct bpool *bp = bc->bp;
  222. return bp->params.n_buffers_per_slab;
  223. }
  224. static struct bcache *
  225. bcache_init(struct bpool *bp)
  226. {
  227. struct bcache *bc;
  228. bc = calloc(1, sizeof(struct bcache));
  229. if (!bc)
  230. return NULL;
  231. bc->bp = bp;
  232. bc->n_buffers_cons = 0;
  233. bc->n_buffers_prod = 0;
  234. pthread_mutex_lock(&bp->lock);
  235. if (bp->n_slabs_reserved_available == 0) {
  236. pthread_mutex_unlock(&bp->lock);
  237. free(bc);
  238. return NULL;
  239. }
  240. bc->slab_cons = bp->slabs_reserved[bp->n_slabs_reserved_available - 1];
  241. bc->slab_prod = bp->slabs_reserved[bp->n_slabs_reserved_available - 2];
  242. bp->n_slabs_reserved_available -= 2;
  243. pthread_mutex_unlock(&bp->lock);
  244. return bc;
  245. }
  246. static void
  247. bcache_free(struct bcache *bc)
  248. {
  249. struct bpool *bp;
  250. if (!bc)
  251. return;
  252. /* In order to keep this example simple, the case of freeing any
  253. * existing buffers from the cache back to the pool is ignored.
  254. */
  255. bp = bc->bp;
  256. pthread_mutex_lock(&bp->lock);
  257. bp->slabs_reserved[bp->n_slabs_reserved_available] = bc->slab_prod;
  258. bp->slabs_reserved[bp->n_slabs_reserved_available + 1] = bc->slab_cons;
  259. bp->n_slabs_reserved_available += 2;
  260. pthread_mutex_unlock(&bp->lock);
  261. free(bc);
  262. }
  263. /* To work correctly, the implementation requires that the *n_buffers* input
  264. * argument is never greater than the buffer pool's *n_buffers_per_slab*. This
  265. * is typically the case, with one exception taking place when large number of
  266. * buffers are allocated at init time (e.g. for the UMEM fill queue setup).
  267. */
  268. static inline u32
  269. bcache_cons_check(struct bcache *bc, u32 n_buffers)
  270. {
  271. struct bpool *bp = bc->bp;
  272. u64 n_buffers_per_slab = bp->params.n_buffers_per_slab;
  273. u64 n_buffers_cons = bc->n_buffers_cons;
  274. u64 n_slabs_available;
  275. u64 *slab_full;
  276. /*
  277. * Consumer slab is not empty: Use what's available locally. Do not
  278. * look for more buffers from the pool when the ask can only be
  279. * partially satisfied.
  280. */
  281. if (n_buffers_cons)
  282. return (n_buffers_cons < n_buffers) ?
  283. n_buffers_cons :
  284. n_buffers;
  285. /*
  286. * Consumer slab is empty: look to trade the current consumer slab
  287. * (full) for a full slab from the pool, if any is available.
  288. */
  289. pthread_mutex_lock(&bp->lock);
  290. n_slabs_available = bp->n_slabs_available;
  291. if (!n_slabs_available) {
  292. pthread_mutex_unlock(&bp->lock);
  293. return 0;
  294. }
  295. n_slabs_available--;
  296. slab_full = bp->slabs[n_slabs_available];
  297. bp->slabs[n_slabs_available] = bc->slab_cons;
  298. bp->n_slabs_available = n_slabs_available;
  299. pthread_mutex_unlock(&bp->lock);
  300. bc->slab_cons = slab_full;
  301. bc->n_buffers_cons = n_buffers_per_slab;
  302. return n_buffers;
  303. }
  304. static inline u64
  305. bcache_cons(struct bcache *bc)
  306. {
  307. u64 n_buffers_cons = bc->n_buffers_cons - 1;
  308. u64 buffer;
  309. buffer = bc->slab_cons[n_buffers_cons];
  310. bc->n_buffers_cons = n_buffers_cons;
  311. return buffer;
  312. }
  313. static inline void
  314. bcache_prod(struct bcache *bc, u64 buffer)
  315. {
  316. struct bpool *bp = bc->bp;
  317. u64 n_buffers_per_slab = bp->params.n_buffers_per_slab;
  318. u64 n_buffers_prod = bc->n_buffers_prod;
  319. u64 n_slabs_available;
  320. u64 *slab_empty;
  321. /*
  322. * Producer slab is not yet full: store the current buffer to it.
  323. */
  324. if (n_buffers_prod < n_buffers_per_slab) {
  325. bc->slab_prod[n_buffers_prod] = buffer;
  326. bc->n_buffers_prod = n_buffers_prod + 1;
  327. return;
  328. }
  329. /*
  330. * Producer slab is full: trade the cache's current producer slab
  331. * (full) for an empty slab from the pool, then store the current
  332. * buffer to the new producer slab. As one full slab exists in the
  333. * cache, it is guaranteed that there is at least one empty slab
  334. * available in the pool.
  335. */
  336. pthread_mutex_lock(&bp->lock);
  337. n_slabs_available = bp->n_slabs_available;
  338. slab_empty = bp->slabs[n_slabs_available];
  339. bp->slabs[n_slabs_available] = bc->slab_prod;
  340. bp->n_slabs_available = n_slabs_available + 1;
  341. pthread_mutex_unlock(&bp->lock);
  342. slab_empty[0] = buffer;
  343. bc->slab_prod = slab_empty;
  344. bc->n_buffers_prod = 1;
  345. }
  346. /*
  347. * Port
  348. *
  349. * Each of the forwarding ports sits on top of an AF_XDP socket. In order for
  350. * packet forwarding to happen with no packet buffer copy, all the sockets need
  351. * to share the same UMEM area, which is used as the buffer pool memory.
  352. */
  353. #ifndef MAX_BURST_RX
  354. #define MAX_BURST_RX 64
  355. #endif
  356. #ifndef MAX_BURST_TX
  357. #define MAX_BURST_TX 64
  358. #endif
  359. struct burst_rx {
  360. u64 addr[MAX_BURST_RX];
  361. u32 len[MAX_BURST_RX];
  362. };
  363. struct burst_tx {
  364. u64 addr[MAX_BURST_TX];
  365. u32 len[MAX_BURST_TX];
  366. u32 n_pkts;
  367. };
  368. struct port_params {
  369. struct xsk_socket_config xsk_cfg;
  370. struct bpool *bp;
  371. const char *iface;
  372. u32 iface_queue;
  373. };
  374. struct port {
  375. struct port_params params;
  376. struct bcache *bc;
  377. struct xsk_ring_cons rxq;
  378. struct xsk_ring_prod txq;
  379. struct xsk_ring_prod umem_fq;
  380. struct xsk_ring_cons umem_cq;
  381. struct xsk_socket *xsk;
  382. int umem_fq_initialized;
  383. u64 n_pkts_rx;
  384. u64 n_pkts_tx;
  385. };
  386. static void
  387. port_free(struct port *p)
  388. {
  389. if (!p)
  390. return;
  391. /* To keep this example simple, the code to free the buffers from the
  392. * socket's receive and transmit queues, as well as from the UMEM fill
  393. * and completion queues, is not included.
  394. */
  395. if (p->xsk)
  396. xsk_socket__delete(p->xsk);
  397. bcache_free(p->bc);
  398. free(p);
  399. }
  400. static struct port *
  401. port_init(struct port_params *params)
  402. {
  403. struct port *p;
  404. u32 umem_fq_size, pos = 0;
  405. int status, i;
  406. /* Memory allocation and initialization. */
  407. p = calloc(sizeof(struct port), 1);
  408. if (!p)
  409. return NULL;
  410. memcpy(&p->params, params, sizeof(p->params));
  411. umem_fq_size = params->bp->umem_cfg.fill_size;
  412. /* bcache. */
  413. p->bc = bcache_init(params->bp);
  414. if (!p->bc ||
  415. (bcache_slab_size(p->bc) < umem_fq_size) ||
  416. (bcache_cons_check(p->bc, umem_fq_size) < umem_fq_size)) {
  417. port_free(p);
  418. return NULL;
  419. }
  420. /* xsk socket. */
  421. status = xsk_socket__create_shared(&p->xsk,
  422. params->iface,
  423. params->iface_queue,
  424. params->bp->umem,
  425. &p->rxq,
  426. &p->txq,
  427. &p->umem_fq,
  428. &p->umem_cq,
  429. &params->xsk_cfg);
  430. if (status) {
  431. port_free(p);
  432. return NULL;
  433. }
  434. /* umem fq. */
  435. xsk_ring_prod__reserve(&p->umem_fq, umem_fq_size, &pos);
  436. for (i = 0; i < umem_fq_size; i++)
  437. *xsk_ring_prod__fill_addr(&p->umem_fq, pos + i) =
  438. bcache_cons(p->bc);
  439. xsk_ring_prod__submit(&p->umem_fq, umem_fq_size);
  440. p->umem_fq_initialized = 1;
  441. return p;
  442. }
  443. static inline u32
  444. port_rx_burst(struct port *p, struct burst_rx *b)
  445. {
  446. u32 n_pkts, pos, i;
  447. /* Free buffers for FQ replenish. */
  448. n_pkts = ARRAY_SIZE(b->addr);
  449. n_pkts = bcache_cons_check(p->bc, n_pkts);
  450. if (!n_pkts)
  451. return 0;
  452. /* RXQ. */
  453. n_pkts = xsk_ring_cons__peek(&p->rxq, n_pkts, &pos);
  454. if (!n_pkts) {
  455. if (xsk_ring_prod__needs_wakeup(&p->umem_fq)) {
  456. struct pollfd pollfd = {
  457. .fd = xsk_socket__fd(p->xsk),
  458. .events = POLLIN,
  459. };
  460. poll(&pollfd, 1, 0);
  461. }
  462. return 0;
  463. }
  464. for (i = 0; i < n_pkts; i++) {
  465. b->addr[i] = xsk_ring_cons__rx_desc(&p->rxq, pos + i)->addr;
  466. b->len[i] = xsk_ring_cons__rx_desc(&p->rxq, pos + i)->len;
  467. }
  468. xsk_ring_cons__release(&p->rxq, n_pkts);
  469. p->n_pkts_rx += n_pkts;
  470. /* UMEM FQ. */
  471. for ( ; ; ) {
  472. int status;
  473. status = xsk_ring_prod__reserve(&p->umem_fq, n_pkts, &pos);
  474. if (status == n_pkts)
  475. break;
  476. if (xsk_ring_prod__needs_wakeup(&p->umem_fq)) {
  477. struct pollfd pollfd = {
  478. .fd = xsk_socket__fd(p->xsk),
  479. .events = POLLIN,
  480. };
  481. poll(&pollfd, 1, 0);
  482. }
  483. }
  484. for (i = 0; i < n_pkts; i++)
  485. *xsk_ring_prod__fill_addr(&p->umem_fq, pos + i) =
  486. bcache_cons(p->bc);
  487. xsk_ring_prod__submit(&p->umem_fq, n_pkts);
  488. return n_pkts;
  489. }
  490. static inline void
  491. port_tx_burst(struct port *p, struct burst_tx *b)
  492. {
  493. u32 n_pkts, pos, i;
  494. int status;
  495. /* UMEM CQ. */
  496. n_pkts = p->params.bp->umem_cfg.comp_size;
  497. n_pkts = xsk_ring_cons__peek(&p->umem_cq, n_pkts, &pos);
  498. for (i = 0; i < n_pkts; i++) {
  499. u64 addr = *xsk_ring_cons__comp_addr(&p->umem_cq, pos + i);
  500. bcache_prod(p->bc, addr);
  501. }
  502. xsk_ring_cons__release(&p->umem_cq, n_pkts);
  503. /* TXQ. */
  504. n_pkts = b->n_pkts;
  505. for ( ; ; ) {
  506. status = xsk_ring_prod__reserve(&p->txq, n_pkts, &pos);
  507. if (status == n_pkts)
  508. break;
  509. if (xsk_ring_prod__needs_wakeup(&p->txq))
  510. sendto(xsk_socket__fd(p->xsk), NULL, 0, MSG_DONTWAIT,
  511. NULL, 0);
  512. }
  513. for (i = 0; i < n_pkts; i++) {
  514. xsk_ring_prod__tx_desc(&p->txq, pos + i)->addr = b->addr[i];
  515. xsk_ring_prod__tx_desc(&p->txq, pos + i)->len = b->len[i];
  516. }
  517. xsk_ring_prod__submit(&p->txq, n_pkts);
  518. if (xsk_ring_prod__needs_wakeup(&p->txq))
  519. sendto(xsk_socket__fd(p->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
  520. p->n_pkts_tx += n_pkts;
  521. }
  522. /*
  523. * Thread
  524. *
  525. * Packet forwarding threads.
  526. */
  527. #ifndef MAX_PORTS_PER_THREAD
  528. #define MAX_PORTS_PER_THREAD 16
  529. #endif
  530. struct thread_data {
  531. struct port *ports_rx[MAX_PORTS_PER_THREAD];
  532. struct port *ports_tx[MAX_PORTS_PER_THREAD];
  533. u32 n_ports_rx;
  534. struct burst_rx burst_rx;
  535. struct burst_tx burst_tx[MAX_PORTS_PER_THREAD];
  536. u32 cpu_core_id;
  537. int quit;
  538. };
  539. static void swap_mac_addresses(void *data)
  540. {
  541. struct ether_header *eth = (struct ether_header *)data;
  542. struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
  543. struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
  544. struct ether_addr tmp;
  545. tmp = *src_addr;
  546. *src_addr = *dst_addr;
  547. *dst_addr = tmp;
  548. }
  549. static void *
  550. thread_func(void *arg)
  551. {
  552. struct thread_data *t = arg;
  553. cpu_set_t cpu_cores;
  554. u32 i;
  555. CPU_ZERO(&cpu_cores);
  556. CPU_SET(t->cpu_core_id, &cpu_cores);
  557. pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_cores);
  558. for (i = 0; !t->quit; i = (i + 1) & (t->n_ports_rx - 1)) {
  559. struct port *port_rx = t->ports_rx[i];
  560. struct port *port_tx = t->ports_tx[i];
  561. struct burst_rx *brx = &t->burst_rx;
  562. struct burst_tx *btx = &t->burst_tx[i];
  563. u32 n_pkts, j;
  564. /* RX. */
  565. n_pkts = port_rx_burst(port_rx, brx);
  566. if (!n_pkts)
  567. continue;
  568. /* Process & TX. */
  569. for (j = 0; j < n_pkts; j++) {
  570. u64 addr = xsk_umem__add_offset_to_addr(brx->addr[j]);
  571. u8 *pkt = xsk_umem__get_data(port_rx->params.bp->addr,
  572. addr);
  573. swap_mac_addresses(pkt);
  574. btx->addr[btx->n_pkts] = brx->addr[j];
  575. btx->len[btx->n_pkts] = brx->len[j];
  576. btx->n_pkts++;
  577. if (btx->n_pkts == MAX_BURST_TX) {
  578. port_tx_burst(port_tx, btx);
  579. btx->n_pkts = 0;
  580. }
  581. }
  582. }
  583. return NULL;
  584. }
  585. /*
  586. * Process
  587. */
  588. static const struct bpool_params bpool_params_default = {
  589. .n_buffers = 64 * 1024,
  590. .buffer_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
  591. .mmap_flags = 0,
  592. .n_users_max = 16,
  593. .n_buffers_per_slab = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
  594. };
  595. static const struct xsk_umem_config umem_cfg_default = {
  596. .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
  597. .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
  598. .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
  599. .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
  600. .flags = 0,
  601. };
  602. static const struct port_params port_params_default = {
  603. .xsk_cfg = {
  604. .rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
  605. .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
  606. .libbpf_flags = 0,
  607. .xdp_flags = XDP_FLAGS_DRV_MODE,
  608. .bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY,
  609. },
  610. .bp = NULL,
  611. .iface = NULL,
  612. .iface_queue = 0,
  613. };
  614. #ifndef MAX_PORTS
  615. #define MAX_PORTS 64
  616. #endif
  617. #ifndef MAX_THREADS
  618. #define MAX_THREADS 64
  619. #endif
  620. static struct bpool_params bpool_params;
  621. static struct xsk_umem_config umem_cfg;
  622. static struct bpool *bp;
  623. static struct port_params port_params[MAX_PORTS];
  624. static struct port *ports[MAX_PORTS];
  625. static u64 n_pkts_rx[MAX_PORTS];
  626. static u64 n_pkts_tx[MAX_PORTS];
  627. static int n_ports;
  628. static pthread_t threads[MAX_THREADS];
  629. static struct thread_data thread_data[MAX_THREADS];
  630. static int n_threads;
  631. static void
  632. print_usage(char *prog_name)
  633. {
  634. const char *usage =
  635. "Usage:\n"
  636. "\t%s [ -b SIZE ] -c CORE -i INTERFACE [ -q QUEUE ]\n"
  637. "\n"
  638. "-c CORE CPU core to run a packet forwarding thread\n"
  639. " on. May be invoked multiple times.\n"
  640. "\n"
  641. "-b SIZE Number of buffers in the buffer pool shared\n"
  642. " by all the forwarding threads. Default: %u.\n"
  643. "\n"
  644. "-i INTERFACE Network interface. Each (INTERFACE, QUEUE)\n"
  645. " pair specifies one forwarding port. May be\n"
  646. " invoked multiple times.\n"
  647. "\n"
  648. "-q QUEUE Network interface queue for RX and TX. Each\n"
  649. " (INTERFACE, QUEUE) pair specified one\n"
  650. " forwarding port. Default: %u. May be invoked\n"
  651. " multiple times.\n"
  652. "\n";
  653. printf(usage,
  654. prog_name,
  655. bpool_params_default.n_buffers,
  656. port_params_default.iface_queue);
  657. }
  658. static int
  659. parse_args(int argc, char **argv)
  660. {
  661. struct option lgopts[] = {
  662. { NULL, 0, 0, 0 }
  663. };
  664. int opt, option_index;
  665. /* Parse the input arguments. */
  666. for ( ; ;) {
  667. opt = getopt_long(argc, argv, "c:i:q:", lgopts, &option_index);
  668. if (opt == EOF)
  669. break;
  670. switch (opt) {
  671. case 'b':
  672. bpool_params.n_buffers = atoi(optarg);
  673. break;
  674. case 'c':
  675. if (n_threads == MAX_THREADS) {
  676. printf("Max number of threads (%d) reached.\n",
  677. MAX_THREADS);
  678. return -1;
  679. }
  680. thread_data[n_threads].cpu_core_id = atoi(optarg);
  681. n_threads++;
  682. break;
  683. case 'i':
  684. if (n_ports == MAX_PORTS) {
  685. printf("Max number of ports (%d) reached.\n",
  686. MAX_PORTS);
  687. return -1;
  688. }
  689. port_params[n_ports].iface = optarg;
  690. port_params[n_ports].iface_queue = 0;
  691. n_ports++;
  692. break;
  693. case 'q':
  694. if (n_ports == 0) {
  695. printf("No port specified for queue.\n");
  696. return -1;
  697. }
  698. port_params[n_ports - 1].iface_queue = atoi(optarg);
  699. break;
  700. default:
  701. printf("Illegal argument.\n");
  702. return -1;
  703. }
  704. }
  705. optind = 1; /* reset getopt lib */
  706. /* Check the input arguments. */
  707. if (!n_ports) {
  708. printf("No ports specified.\n");
  709. return -1;
  710. }
  711. if (!n_threads) {
  712. printf("No threads specified.\n");
  713. return -1;
  714. }
  715. if (n_ports % n_threads) {
  716. printf("Ports cannot be evenly distributed to threads.\n");
  717. return -1;
  718. }
  719. return 0;
  720. }
  721. static void
  722. print_port(u32 port_id)
  723. {
  724. struct port *port = ports[port_id];
  725. printf("Port %u: interface = %s, queue = %u\n",
  726. port_id, port->params.iface, port->params.iface_queue);
  727. }
  728. static void
  729. print_thread(u32 thread_id)
  730. {
  731. struct thread_data *t = &thread_data[thread_id];
  732. u32 i;
  733. printf("Thread %u (CPU core %u): ",
  734. thread_id, t->cpu_core_id);
  735. for (i = 0; i < t->n_ports_rx; i++) {
  736. struct port *port_rx = t->ports_rx[i];
  737. struct port *port_tx = t->ports_tx[i];
  738. printf("(%s, %u) -> (%s, %u), ",
  739. port_rx->params.iface,
  740. port_rx->params.iface_queue,
  741. port_tx->params.iface,
  742. port_tx->params.iface_queue);
  743. }
  744. printf("\n");
  745. }
  746. static void
  747. print_port_stats_separator(void)
  748. {
  749. printf("+-%4s-+-%12s-+-%13s-+-%12s-+-%13s-+\n",
  750. "----",
  751. "------------",
  752. "-------------",
  753. "------------",
  754. "-------------");
  755. }
  756. static void
  757. print_port_stats_header(void)
  758. {
  759. print_port_stats_separator();
  760. printf("| %4s | %12s | %13s | %12s | %13s |\n",
  761. "Port",
  762. "RX packets",
  763. "RX rate (pps)",
  764. "TX packets",
  765. "TX_rate (pps)");
  766. print_port_stats_separator();
  767. }
  768. static void
  769. print_port_stats_trailer(void)
  770. {
  771. print_port_stats_separator();
  772. printf("\n");
  773. }
  774. static void
  775. print_port_stats(int port_id, u64 ns_diff)
  776. {
  777. struct port *p = ports[port_id];
  778. double rx_pps, tx_pps;
  779. rx_pps = (p->n_pkts_rx - n_pkts_rx[port_id]) * 1000000000. / ns_diff;
  780. tx_pps = (p->n_pkts_tx - n_pkts_tx[port_id]) * 1000000000. / ns_diff;
  781. printf("| %4d | %12llu | %13.0f | %12llu | %13.0f |\n",
  782. port_id,
  783. p->n_pkts_rx,
  784. rx_pps,
  785. p->n_pkts_tx,
  786. tx_pps);
  787. n_pkts_rx[port_id] = p->n_pkts_rx;
  788. n_pkts_tx[port_id] = p->n_pkts_tx;
  789. }
  790. static void
  791. print_port_stats_all(u64 ns_diff)
  792. {
  793. int i;
  794. print_port_stats_header();
  795. for (i = 0; i < n_ports; i++)
  796. print_port_stats(i, ns_diff);
  797. print_port_stats_trailer();
  798. }
  799. static int quit;
  800. static void
  801. signal_handler(int sig)
  802. {
  803. quit = 1;
  804. }
  805. static void remove_xdp_program(void)
  806. {
  807. int i;
  808. for (i = 0 ; i < n_ports; i++)
  809. bpf_set_link_xdp_fd(if_nametoindex(port_params[i].iface), -1,
  810. port_params[i].xsk_cfg.xdp_flags);
  811. }
  812. int main(int argc, char **argv)
  813. {
  814. struct timespec time;
  815. u64 ns0;
  816. int i;
  817. /* Parse args. */
  818. memcpy(&bpool_params, &bpool_params_default,
  819. sizeof(struct bpool_params));
  820. memcpy(&umem_cfg, &umem_cfg_default,
  821. sizeof(struct xsk_umem_config));
  822. for (i = 0; i < MAX_PORTS; i++)
  823. memcpy(&port_params[i], &port_params_default,
  824. sizeof(struct port_params));
  825. if (parse_args(argc, argv)) {
  826. print_usage(argv[0]);
  827. return -1;
  828. }
  829. /* Buffer pool initialization. */
  830. bp = bpool_init(&bpool_params, &umem_cfg);
  831. if (!bp) {
  832. printf("Buffer pool initialization failed.\n");
  833. return -1;
  834. }
  835. printf("Buffer pool created successfully.\n");
  836. /* Ports initialization. */
  837. for (i = 0; i < MAX_PORTS; i++)
  838. port_params[i].bp = bp;
  839. for (i = 0; i < n_ports; i++) {
  840. ports[i] = port_init(&port_params[i]);
  841. if (!ports[i]) {
  842. printf("Port %d initialization failed.\n", i);
  843. return -1;
  844. }
  845. print_port(i);
  846. }
  847. printf("All ports created successfully.\n");
  848. /* Threads. */
  849. for (i = 0; i < n_threads; i++) {
  850. struct thread_data *t = &thread_data[i];
  851. u32 n_ports_per_thread = n_ports / n_threads, j;
  852. for (j = 0; j < n_ports_per_thread; j++) {
  853. t->ports_rx[j] = ports[i * n_ports_per_thread + j];
  854. t->ports_tx[j] = ports[i * n_ports_per_thread +
  855. (j + 1) % n_ports_per_thread];
  856. }
  857. t->n_ports_rx = n_ports_per_thread;
  858. print_thread(i);
  859. }
  860. for (i = 0; i < n_threads; i++) {
  861. int status;
  862. status = pthread_create(&threads[i],
  863. NULL,
  864. thread_func,
  865. &thread_data[i]);
  866. if (status) {
  867. printf("Thread %d creation failed.\n", i);
  868. return -1;
  869. }
  870. }
  871. printf("All threads created successfully.\n");
  872. /* Print statistics. */
  873. signal(SIGINT, signal_handler);
  874. signal(SIGTERM, signal_handler);
  875. signal(SIGABRT, signal_handler);
  876. clock_gettime(CLOCK_MONOTONIC, &time);
  877. ns0 = time.tv_sec * 1000000000UL + time.tv_nsec;
  878. for ( ; !quit; ) {
  879. u64 ns1, ns_diff;
  880. sleep(1);
  881. clock_gettime(CLOCK_MONOTONIC, &time);
  882. ns1 = time.tv_sec * 1000000000UL + time.tv_nsec;
  883. ns_diff = ns1 - ns0;
  884. ns0 = ns1;
  885. print_port_stats_all(ns_diff);
  886. }
  887. /* Threads completion. */
  888. printf("Quit.\n");
  889. for (i = 0; i < n_threads; i++)
  890. thread_data[i].quit = 1;
  891. for (i = 0; i < n_threads; i++)
  892. pthread_join(threads[i], NULL);
  893. for (i = 0; i < n_ports; i++)
  894. port_free(ports[i]);
  895. bpool_free(bp);
  896. remove_xdp_program();
  897. return 0;
  898. }