xsk.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* XDP sockets
  3. *
  4. * AF_XDP sockets allows a channel between XDP programs and userspace
  5. * applications.
  6. * Copyright(c) 2018 Intel Corporation.
  7. *
  8. * Author(s): Björn Töpel <bjorn.topel@intel.com>
  9. * Magnus Karlsson <magnus.karlsson@intel.com>
  10. */
  11. #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
  12. #include <linux/if_xdp.h>
  13. #include <linux/init.h>
  14. #include <linux/sched/mm.h>
  15. #include <linux/sched/signal.h>
  16. #include <linux/sched/task.h>
  17. #include <linux/socket.h>
  18. #include <linux/file.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/net.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/rculist.h>
  23. #include <net/xdp_sock_drv.h>
  24. #include <net/xdp.h>
  25. #include "xsk_queue.h"
  26. #include "xdp_umem.h"
  27. #include "xsk.h"
  28. #define TX_BATCH_SIZE 16
  29. static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
  30. void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
  31. {
  32. if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
  33. return;
  34. pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
  35. pool->cached_need_wakeup |= XDP_WAKEUP_RX;
  36. }
  37. EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
  38. void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
  39. {
  40. struct xdp_sock *xs;
  41. if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
  42. return;
  43. rcu_read_lock();
  44. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  45. xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
  46. }
  47. rcu_read_unlock();
  48. pool->cached_need_wakeup |= XDP_WAKEUP_TX;
  49. }
  50. EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
  51. void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
  52. {
  53. if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
  54. return;
  55. pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
  56. pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
  57. }
  58. EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
  59. void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
  60. {
  61. struct xdp_sock *xs;
  62. if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
  63. return;
  64. rcu_read_lock();
  65. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  66. xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
  67. }
  68. rcu_read_unlock();
  69. pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
  70. }
  71. EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
  72. bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
  73. {
  74. return pool->uses_need_wakeup;
  75. }
  76. EXPORT_SYMBOL(xsk_uses_need_wakeup);
  77. struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
  78. u16 queue_id)
  79. {
  80. if (queue_id < dev->real_num_rx_queues)
  81. return dev->_rx[queue_id].pool;
  82. if (queue_id < dev->real_num_tx_queues)
  83. return dev->_tx[queue_id].pool;
  84. return NULL;
  85. }
  86. EXPORT_SYMBOL(xsk_get_pool_from_qid);
  87. void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
  88. {
  89. if (queue_id < dev->num_rx_queues)
  90. dev->_rx[queue_id].pool = NULL;
  91. if (queue_id < dev->num_tx_queues)
  92. dev->_tx[queue_id].pool = NULL;
  93. }
  94. /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
  95. * not know if the device has more tx queues than rx, or the opposite.
  96. * This might also change during run time.
  97. */
  98. int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
  99. u16 queue_id)
  100. {
  101. if (queue_id >= max_t(unsigned int,
  102. dev->real_num_rx_queues,
  103. dev->real_num_tx_queues))
  104. return -EINVAL;
  105. if (queue_id < dev->real_num_rx_queues)
  106. dev->_rx[queue_id].pool = pool;
  107. if (queue_id < dev->real_num_tx_queues)
  108. dev->_tx[queue_id].pool = pool;
  109. return 0;
  110. }
  111. void xp_release(struct xdp_buff_xsk *xskb)
  112. {
  113. xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
  114. }
  115. static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
  116. {
  117. u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
  118. offset += xskb->pool->headroom;
  119. if (!xskb->pool->unaligned)
  120. return xskb->orig_addr + offset;
  121. return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
  122. }
  123. static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
  124. {
  125. struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
  126. u64 addr;
  127. int err;
  128. addr = xp_get_handle(xskb);
  129. err = xskq_prod_reserve_desc(xs->rx, addr, len);
  130. if (err) {
  131. xs->rx_queue_full++;
  132. return err;
  133. }
  134. xp_release(xskb);
  135. return 0;
  136. }
  137. static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
  138. {
  139. void *from_buf, *to_buf;
  140. u32 metalen;
  141. if (unlikely(xdp_data_meta_unsupported(from))) {
  142. from_buf = from->data;
  143. to_buf = to->data;
  144. metalen = 0;
  145. } else {
  146. from_buf = from->data_meta;
  147. metalen = from->data - from->data_meta;
  148. to_buf = to->data - metalen;
  149. }
  150. memcpy(to_buf, from_buf, len + metalen);
  151. }
  152. static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
  153. bool explicit_free)
  154. {
  155. struct xdp_buff *xsk_xdp;
  156. int err;
  157. if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
  158. xs->rx_dropped++;
  159. return -ENOSPC;
  160. }
  161. xsk_xdp = xsk_buff_alloc(xs->pool);
  162. if (!xsk_xdp) {
  163. xs->rx_dropped++;
  164. return -ENOSPC;
  165. }
  166. xsk_copy_xdp(xsk_xdp, xdp, len);
  167. err = __xsk_rcv_zc(xs, xsk_xdp, len);
  168. if (err) {
  169. xsk_buff_free(xsk_xdp);
  170. return err;
  171. }
  172. if (explicit_free)
  173. xdp_return_buff(xdp);
  174. return 0;
  175. }
  176. static bool xsk_tx_writeable(struct xdp_sock *xs)
  177. {
  178. if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
  179. return false;
  180. return true;
  181. }
  182. static bool xsk_is_bound(struct xdp_sock *xs)
  183. {
  184. if (READ_ONCE(xs->state) == XSK_BOUND) {
  185. /* Matches smp_wmb() in bind(). */
  186. smp_rmb();
  187. return true;
  188. }
  189. return false;
  190. }
  191. static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
  192. bool explicit_free)
  193. {
  194. u32 len;
  195. if (!xsk_is_bound(xs))
  196. return -EINVAL;
  197. if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
  198. return -EINVAL;
  199. len = xdp->data_end - xdp->data;
  200. return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
  201. __xsk_rcv_zc(xs, xdp, len) :
  202. __xsk_rcv(xs, xdp, len, explicit_free);
  203. }
  204. static void xsk_flush(struct xdp_sock *xs)
  205. {
  206. xskq_prod_submit(xs->rx);
  207. __xskq_cons_release(xs->pool->fq);
  208. sock_def_readable(&xs->sk);
  209. }
  210. int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
  211. {
  212. int err;
  213. spin_lock_bh(&xs->rx_lock);
  214. err = xsk_rcv(xs, xdp, false);
  215. xsk_flush(xs);
  216. spin_unlock_bh(&xs->rx_lock);
  217. return err;
  218. }
  219. int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
  220. {
  221. struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
  222. int err;
  223. err = xsk_rcv(xs, xdp, true);
  224. if (err)
  225. return err;
  226. if (!xs->flush_node.prev)
  227. list_add(&xs->flush_node, flush_list);
  228. return 0;
  229. }
  230. void __xsk_map_flush(void)
  231. {
  232. struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
  233. struct xdp_sock *xs, *tmp;
  234. list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
  235. xsk_flush(xs);
  236. __list_del_clearprev(&xs->flush_node);
  237. }
  238. }
  239. void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
  240. {
  241. xskq_prod_submit_n(pool->cq, nb_entries);
  242. }
  243. EXPORT_SYMBOL(xsk_tx_completed);
  244. void xsk_tx_release(struct xsk_buff_pool *pool)
  245. {
  246. struct xdp_sock *xs;
  247. rcu_read_lock();
  248. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  249. __xskq_cons_release(xs->tx);
  250. if (xsk_tx_writeable(xs))
  251. xs->sk.sk_write_space(&xs->sk);
  252. }
  253. rcu_read_unlock();
  254. }
  255. EXPORT_SYMBOL(xsk_tx_release);
  256. bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
  257. {
  258. struct xdp_sock *xs;
  259. rcu_read_lock();
  260. list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
  261. if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
  262. xs->tx->queue_empty_descs++;
  263. continue;
  264. }
  265. /* This is the backpressure mechanism for the Tx path.
  266. * Reserve space in the completion queue and only proceed
  267. * if there is space in it. This avoids having to implement
  268. * any buffering in the Tx path.
  269. */
  270. if (xskq_prod_reserve_addr(pool->cq, desc->addr))
  271. goto out;
  272. xskq_cons_release(xs->tx);
  273. rcu_read_unlock();
  274. return true;
  275. }
  276. out:
  277. rcu_read_unlock();
  278. return false;
  279. }
  280. EXPORT_SYMBOL(xsk_tx_peek_desc);
  281. static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
  282. {
  283. struct net_device *dev = xs->dev;
  284. int err;
  285. rcu_read_lock();
  286. err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
  287. rcu_read_unlock();
  288. return err;
  289. }
  290. static int xsk_zc_xmit(struct xdp_sock *xs)
  291. {
  292. return xsk_wakeup(xs, XDP_WAKEUP_TX);
  293. }
  294. static void xsk_destruct_skb(struct sk_buff *skb)
  295. {
  296. u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
  297. struct xdp_sock *xs = xdp_sk(skb->sk);
  298. unsigned long flags;
  299. spin_lock_irqsave(&xs->pool->cq_lock, flags);
  300. xskq_prod_submit_addr(xs->pool->cq, addr);
  301. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  302. sock_wfree(skb);
  303. }
  304. static int xsk_generic_xmit(struct sock *sk)
  305. {
  306. struct xdp_sock *xs = xdp_sk(sk);
  307. u32 max_batch = TX_BATCH_SIZE;
  308. bool sent_frame = false;
  309. struct xdp_desc desc;
  310. struct sk_buff *skb;
  311. unsigned long flags;
  312. int err = 0;
  313. u32 hr, tr;
  314. mutex_lock(&xs->mutex);
  315. if (xs->queue_id >= xs->dev->real_num_tx_queues)
  316. goto out;
  317. hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
  318. tr = xs->dev->needed_tailroom;
  319. while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
  320. char *buffer;
  321. u64 addr;
  322. u32 len;
  323. if (max_batch-- == 0) {
  324. err = -EAGAIN;
  325. goto out;
  326. }
  327. len = desc.len;
  328. skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
  329. if (unlikely(!skb))
  330. goto out;
  331. skb_reserve(skb, hr);
  332. skb_put(skb, len);
  333. addr = desc.addr;
  334. buffer = xsk_buff_raw_get_data(xs->pool, addr);
  335. err = skb_store_bits(skb, 0, buffer, len);
  336. /* This is the backpressure mechanism for the Tx path.
  337. * Reserve space in the completion queue and only proceed
  338. * if there is space in it. This avoids having to implement
  339. * any buffering in the Tx path.
  340. */
  341. spin_lock_irqsave(&xs->pool->cq_lock, flags);
  342. if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
  343. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  344. kfree_skb(skb);
  345. goto out;
  346. }
  347. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  348. skb->dev = xs->dev;
  349. skb->priority = sk->sk_priority;
  350. skb->mark = sk->sk_mark;
  351. skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
  352. skb->destructor = xsk_destruct_skb;
  353. err = __dev_direct_xmit(skb, xs->queue_id);
  354. if (err == NETDEV_TX_BUSY) {
  355. /* Tell user-space to retry the send */
  356. skb->destructor = sock_wfree;
  357. spin_lock_irqsave(&xs->pool->cq_lock, flags);
  358. xskq_prod_cancel(xs->pool->cq);
  359. spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
  360. /* Free skb without triggering the perf drop trace */
  361. consume_skb(skb);
  362. err = -EAGAIN;
  363. goto out;
  364. }
  365. xskq_cons_release(xs->tx);
  366. /* Ignore NET_XMIT_CN as packet might have been sent */
  367. if (err == NET_XMIT_DROP) {
  368. /* SKB completed but not sent */
  369. err = -EBUSY;
  370. goto out;
  371. }
  372. sent_frame = true;
  373. }
  374. xs->tx->queue_empty_descs++;
  375. out:
  376. if (sent_frame)
  377. if (xsk_tx_writeable(xs))
  378. sk->sk_write_space(sk);
  379. mutex_unlock(&xs->mutex);
  380. return err;
  381. }
  382. static int __xsk_sendmsg(struct sock *sk)
  383. {
  384. struct xdp_sock *xs = xdp_sk(sk);
  385. if (unlikely(!(xs->dev->flags & IFF_UP)))
  386. return -ENETDOWN;
  387. if (unlikely(!xs->tx))
  388. return -ENOBUFS;
  389. return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
  390. }
  391. static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
  392. {
  393. bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
  394. struct sock *sk = sock->sk;
  395. struct xdp_sock *xs = xdp_sk(sk);
  396. if (unlikely(!xsk_is_bound(xs)))
  397. return -ENXIO;
  398. if (unlikely(need_wait))
  399. return -EOPNOTSUPP;
  400. return __xsk_sendmsg(sk);
  401. }
  402. static __poll_t xsk_poll(struct file *file, struct socket *sock,
  403. struct poll_table_struct *wait)
  404. {
  405. __poll_t mask = 0;
  406. struct sock *sk = sock->sk;
  407. struct xdp_sock *xs = xdp_sk(sk);
  408. struct xsk_buff_pool *pool;
  409. sock_poll_wait(file, sock, wait);
  410. if (unlikely(!xsk_is_bound(xs)))
  411. return mask;
  412. pool = xs->pool;
  413. if (pool->cached_need_wakeup) {
  414. if (xs->zc)
  415. xsk_wakeup(xs, pool->cached_need_wakeup);
  416. else
  417. /* Poll needs to drive Tx also in copy mode */
  418. __xsk_sendmsg(sk);
  419. }
  420. if (xs->rx && !xskq_prod_is_empty(xs->rx))
  421. mask |= EPOLLIN | EPOLLRDNORM;
  422. if (xs->tx && xsk_tx_writeable(xs))
  423. mask |= EPOLLOUT | EPOLLWRNORM;
  424. return mask;
  425. }
  426. static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
  427. bool umem_queue)
  428. {
  429. struct xsk_queue *q;
  430. if (entries == 0 || *queue || !is_power_of_2(entries))
  431. return -EINVAL;
  432. q = xskq_create(entries, umem_queue);
  433. if (!q)
  434. return -ENOMEM;
  435. /* Make sure queue is ready before it can be seen by others */
  436. smp_wmb();
  437. WRITE_ONCE(*queue, q);
  438. return 0;
  439. }
  440. static void xsk_unbind_dev(struct xdp_sock *xs)
  441. {
  442. struct net_device *dev = xs->dev;
  443. if (xs->state != XSK_BOUND)
  444. return;
  445. WRITE_ONCE(xs->state, XSK_UNBOUND);
  446. /* Wait for driver to stop using the xdp socket. */
  447. xp_del_xsk(xs->pool, xs);
  448. xs->dev = NULL;
  449. synchronize_net();
  450. dev_put(dev);
  451. }
  452. static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
  453. struct xdp_sock ***map_entry)
  454. {
  455. struct xsk_map *map = NULL;
  456. struct xsk_map_node *node;
  457. *map_entry = NULL;
  458. spin_lock_bh(&xs->map_list_lock);
  459. node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
  460. node);
  461. if (node) {
  462. WARN_ON(xsk_map_inc(node->map));
  463. map = node->map;
  464. *map_entry = node->map_entry;
  465. }
  466. spin_unlock_bh(&xs->map_list_lock);
  467. return map;
  468. }
  469. static void xsk_delete_from_maps(struct xdp_sock *xs)
  470. {
  471. /* This function removes the current XDP socket from all the
  472. * maps it resides in. We need to take extra care here, due to
  473. * the two locks involved. Each map has a lock synchronizing
  474. * updates to the entries, and each socket has a lock that
  475. * synchronizes access to the list of maps (map_list). For
  476. * deadlock avoidance the locks need to be taken in the order
  477. * "map lock"->"socket map list lock". We start off by
  478. * accessing the socket map list, and take a reference to the
  479. * map to guarantee existence between the
  480. * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
  481. * calls. Then we ask the map to remove the socket, which
  482. * tries to remove the socket from the map. Note that there
  483. * might be updates to the map between
  484. * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
  485. */
  486. struct xdp_sock **map_entry = NULL;
  487. struct xsk_map *map;
  488. while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
  489. xsk_map_try_sock_delete(map, xs, map_entry);
  490. xsk_map_put(map);
  491. }
  492. }
  493. static int xsk_release(struct socket *sock)
  494. {
  495. struct sock *sk = sock->sk;
  496. struct xdp_sock *xs = xdp_sk(sk);
  497. struct net *net;
  498. if (!sk)
  499. return 0;
  500. net = sock_net(sk);
  501. mutex_lock(&net->xdp.lock);
  502. sk_del_node_init_rcu(sk);
  503. mutex_unlock(&net->xdp.lock);
  504. local_bh_disable();
  505. sock_prot_inuse_add(net, sk->sk_prot, -1);
  506. local_bh_enable();
  507. xsk_delete_from_maps(xs);
  508. mutex_lock(&xs->mutex);
  509. xsk_unbind_dev(xs);
  510. mutex_unlock(&xs->mutex);
  511. xskq_destroy(xs->rx);
  512. xskq_destroy(xs->tx);
  513. xskq_destroy(xs->fq_tmp);
  514. xskq_destroy(xs->cq_tmp);
  515. sock_orphan(sk);
  516. sock->sk = NULL;
  517. sk_refcnt_debug_release(sk);
  518. sock_put(sk);
  519. return 0;
  520. }
  521. static struct socket *xsk_lookup_xsk_from_fd(int fd)
  522. {
  523. struct socket *sock;
  524. int err;
  525. sock = sockfd_lookup(fd, &err);
  526. if (!sock)
  527. return ERR_PTR(-ENOTSOCK);
  528. if (sock->sk->sk_family != PF_XDP) {
  529. sockfd_put(sock);
  530. return ERR_PTR(-ENOPROTOOPT);
  531. }
  532. return sock;
  533. }
  534. static bool xsk_validate_queues(struct xdp_sock *xs)
  535. {
  536. return xs->fq_tmp && xs->cq_tmp;
  537. }
  538. static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
  539. {
  540. struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
  541. struct sock *sk = sock->sk;
  542. struct xdp_sock *xs = xdp_sk(sk);
  543. struct net_device *dev;
  544. u32 flags, qid;
  545. int err = 0;
  546. if (addr_len < sizeof(struct sockaddr_xdp))
  547. return -EINVAL;
  548. if (sxdp->sxdp_family != AF_XDP)
  549. return -EINVAL;
  550. flags = sxdp->sxdp_flags;
  551. if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
  552. XDP_USE_NEED_WAKEUP))
  553. return -EINVAL;
  554. rtnl_lock();
  555. mutex_lock(&xs->mutex);
  556. if (xs->state != XSK_READY) {
  557. err = -EBUSY;
  558. goto out_release;
  559. }
  560. dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
  561. if (!dev) {
  562. err = -ENODEV;
  563. goto out_release;
  564. }
  565. if (!xs->rx && !xs->tx) {
  566. err = -EINVAL;
  567. goto out_unlock;
  568. }
  569. qid = sxdp->sxdp_queue_id;
  570. if (flags & XDP_SHARED_UMEM) {
  571. struct xdp_sock *umem_xs;
  572. struct socket *sock;
  573. if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
  574. (flags & XDP_USE_NEED_WAKEUP)) {
  575. /* Cannot specify flags for shared sockets. */
  576. err = -EINVAL;
  577. goto out_unlock;
  578. }
  579. if (xs->umem) {
  580. /* We have already our own. */
  581. err = -EINVAL;
  582. goto out_unlock;
  583. }
  584. sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
  585. if (IS_ERR(sock)) {
  586. err = PTR_ERR(sock);
  587. goto out_unlock;
  588. }
  589. umem_xs = xdp_sk(sock->sk);
  590. if (!xsk_is_bound(umem_xs)) {
  591. err = -EBADF;
  592. sockfd_put(sock);
  593. goto out_unlock;
  594. }
  595. if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
  596. /* Share the umem with another socket on another qid
  597. * and/or device.
  598. */
  599. xs->pool = xp_create_and_assign_umem(xs,
  600. umem_xs->umem);
  601. if (!xs->pool) {
  602. err = -ENOMEM;
  603. sockfd_put(sock);
  604. goto out_unlock;
  605. }
  606. err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
  607. dev, qid);
  608. if (err) {
  609. xp_destroy(xs->pool);
  610. xs->pool = NULL;
  611. sockfd_put(sock);
  612. goto out_unlock;
  613. }
  614. } else {
  615. /* Share the buffer pool with the other socket. */
  616. if (xs->fq_tmp || xs->cq_tmp) {
  617. /* Do not allow setting your own fq or cq. */
  618. err = -EINVAL;
  619. sockfd_put(sock);
  620. goto out_unlock;
  621. }
  622. xp_get_pool(umem_xs->pool);
  623. xs->pool = umem_xs->pool;
  624. }
  625. xdp_get_umem(umem_xs->umem);
  626. WRITE_ONCE(xs->umem, umem_xs->umem);
  627. sockfd_put(sock);
  628. } else if (!xs->umem || !xsk_validate_queues(xs)) {
  629. err = -EINVAL;
  630. goto out_unlock;
  631. } else {
  632. /* This xsk has its own umem. */
  633. xs->pool = xp_create_and_assign_umem(xs, xs->umem);
  634. if (!xs->pool) {
  635. err = -ENOMEM;
  636. goto out_unlock;
  637. }
  638. err = xp_assign_dev(xs->pool, dev, qid, flags);
  639. if (err) {
  640. xp_destroy(xs->pool);
  641. xs->pool = NULL;
  642. goto out_unlock;
  643. }
  644. }
  645. /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
  646. xs->fq_tmp = NULL;
  647. xs->cq_tmp = NULL;
  648. xs->dev = dev;
  649. xs->zc = xs->umem->zc;
  650. xs->queue_id = qid;
  651. xp_add_xsk(xs->pool, xs);
  652. out_unlock:
  653. if (err) {
  654. dev_put(dev);
  655. } else {
  656. /* Matches smp_rmb() in bind() for shared umem
  657. * sockets, and xsk_is_bound().
  658. */
  659. smp_wmb();
  660. WRITE_ONCE(xs->state, XSK_BOUND);
  661. }
  662. out_release:
  663. mutex_unlock(&xs->mutex);
  664. rtnl_unlock();
  665. return err;
  666. }
  667. struct xdp_umem_reg_v1 {
  668. __u64 addr; /* Start of packet data area */
  669. __u64 len; /* Length of packet data area */
  670. __u32 chunk_size;
  671. __u32 headroom;
  672. };
  673. static int xsk_setsockopt(struct socket *sock, int level, int optname,
  674. sockptr_t optval, unsigned int optlen)
  675. {
  676. struct sock *sk = sock->sk;
  677. struct xdp_sock *xs = xdp_sk(sk);
  678. int err;
  679. if (level != SOL_XDP)
  680. return -ENOPROTOOPT;
  681. switch (optname) {
  682. case XDP_RX_RING:
  683. case XDP_TX_RING:
  684. {
  685. struct xsk_queue **q;
  686. int entries;
  687. if (optlen < sizeof(entries))
  688. return -EINVAL;
  689. if (copy_from_sockptr(&entries, optval, sizeof(entries)))
  690. return -EFAULT;
  691. mutex_lock(&xs->mutex);
  692. if (xs->state != XSK_READY) {
  693. mutex_unlock(&xs->mutex);
  694. return -EBUSY;
  695. }
  696. q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
  697. err = xsk_init_queue(entries, q, false);
  698. if (!err && optname == XDP_TX_RING)
  699. /* Tx needs to be explicitly woken up the first time */
  700. xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
  701. mutex_unlock(&xs->mutex);
  702. return err;
  703. }
  704. case XDP_UMEM_REG:
  705. {
  706. size_t mr_size = sizeof(struct xdp_umem_reg);
  707. struct xdp_umem_reg mr = {};
  708. struct xdp_umem *umem;
  709. if (optlen < sizeof(struct xdp_umem_reg_v1))
  710. return -EINVAL;
  711. else if (optlen < sizeof(mr))
  712. mr_size = sizeof(struct xdp_umem_reg_v1);
  713. if (copy_from_sockptr(&mr, optval, mr_size))
  714. return -EFAULT;
  715. mutex_lock(&xs->mutex);
  716. if (xs->state != XSK_READY || xs->umem) {
  717. mutex_unlock(&xs->mutex);
  718. return -EBUSY;
  719. }
  720. umem = xdp_umem_create(&mr);
  721. if (IS_ERR(umem)) {
  722. mutex_unlock(&xs->mutex);
  723. return PTR_ERR(umem);
  724. }
  725. /* Make sure umem is ready before it can be seen by others */
  726. smp_wmb();
  727. WRITE_ONCE(xs->umem, umem);
  728. mutex_unlock(&xs->mutex);
  729. return 0;
  730. }
  731. case XDP_UMEM_FILL_RING:
  732. case XDP_UMEM_COMPLETION_RING:
  733. {
  734. struct xsk_queue **q;
  735. int entries;
  736. if (copy_from_sockptr(&entries, optval, sizeof(entries)))
  737. return -EFAULT;
  738. mutex_lock(&xs->mutex);
  739. if (xs->state != XSK_READY) {
  740. mutex_unlock(&xs->mutex);
  741. return -EBUSY;
  742. }
  743. q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
  744. &xs->cq_tmp;
  745. err = xsk_init_queue(entries, q, true);
  746. mutex_unlock(&xs->mutex);
  747. return err;
  748. }
  749. default:
  750. break;
  751. }
  752. return -ENOPROTOOPT;
  753. }
  754. static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
  755. {
  756. ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
  757. ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
  758. ring->desc = offsetof(struct xdp_rxtx_ring, desc);
  759. }
  760. static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
  761. {
  762. ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
  763. ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
  764. ring->desc = offsetof(struct xdp_umem_ring, desc);
  765. }
  766. struct xdp_statistics_v1 {
  767. __u64 rx_dropped;
  768. __u64 rx_invalid_descs;
  769. __u64 tx_invalid_descs;
  770. };
  771. static int xsk_getsockopt(struct socket *sock, int level, int optname,
  772. char __user *optval, int __user *optlen)
  773. {
  774. struct sock *sk = sock->sk;
  775. struct xdp_sock *xs = xdp_sk(sk);
  776. int len;
  777. if (level != SOL_XDP)
  778. return -ENOPROTOOPT;
  779. if (get_user(len, optlen))
  780. return -EFAULT;
  781. if (len < 0)
  782. return -EINVAL;
  783. switch (optname) {
  784. case XDP_STATISTICS:
  785. {
  786. struct xdp_statistics stats = {};
  787. bool extra_stats = true;
  788. size_t stats_size;
  789. if (len < sizeof(struct xdp_statistics_v1)) {
  790. return -EINVAL;
  791. } else if (len < sizeof(stats)) {
  792. extra_stats = false;
  793. stats_size = sizeof(struct xdp_statistics_v1);
  794. } else {
  795. stats_size = sizeof(stats);
  796. }
  797. mutex_lock(&xs->mutex);
  798. stats.rx_dropped = xs->rx_dropped;
  799. if (extra_stats) {
  800. stats.rx_ring_full = xs->rx_queue_full;
  801. stats.rx_fill_ring_empty_descs =
  802. xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
  803. stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
  804. } else {
  805. stats.rx_dropped += xs->rx_queue_full;
  806. }
  807. stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
  808. stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
  809. mutex_unlock(&xs->mutex);
  810. if (copy_to_user(optval, &stats, stats_size))
  811. return -EFAULT;
  812. if (put_user(stats_size, optlen))
  813. return -EFAULT;
  814. return 0;
  815. }
  816. case XDP_MMAP_OFFSETS:
  817. {
  818. struct xdp_mmap_offsets off;
  819. struct xdp_mmap_offsets_v1 off_v1;
  820. bool flags_supported = true;
  821. void *to_copy;
  822. if (len < sizeof(off_v1))
  823. return -EINVAL;
  824. else if (len < sizeof(off))
  825. flags_supported = false;
  826. if (flags_supported) {
  827. /* xdp_ring_offset is identical to xdp_ring_offset_v1
  828. * except for the flags field added to the end.
  829. */
  830. xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
  831. &off.rx);
  832. xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
  833. &off.tx);
  834. xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
  835. &off.fr);
  836. xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
  837. &off.cr);
  838. off.rx.flags = offsetof(struct xdp_rxtx_ring,
  839. ptrs.flags);
  840. off.tx.flags = offsetof(struct xdp_rxtx_ring,
  841. ptrs.flags);
  842. off.fr.flags = offsetof(struct xdp_umem_ring,
  843. ptrs.flags);
  844. off.cr.flags = offsetof(struct xdp_umem_ring,
  845. ptrs.flags);
  846. len = sizeof(off);
  847. to_copy = &off;
  848. } else {
  849. xsk_enter_rxtx_offsets(&off_v1.rx);
  850. xsk_enter_rxtx_offsets(&off_v1.tx);
  851. xsk_enter_umem_offsets(&off_v1.fr);
  852. xsk_enter_umem_offsets(&off_v1.cr);
  853. len = sizeof(off_v1);
  854. to_copy = &off_v1;
  855. }
  856. if (copy_to_user(optval, to_copy, len))
  857. return -EFAULT;
  858. if (put_user(len, optlen))
  859. return -EFAULT;
  860. return 0;
  861. }
  862. case XDP_OPTIONS:
  863. {
  864. struct xdp_options opts = {};
  865. if (len < sizeof(opts))
  866. return -EINVAL;
  867. mutex_lock(&xs->mutex);
  868. if (xs->zc)
  869. opts.flags |= XDP_OPTIONS_ZEROCOPY;
  870. mutex_unlock(&xs->mutex);
  871. len = sizeof(opts);
  872. if (copy_to_user(optval, &opts, len))
  873. return -EFAULT;
  874. if (put_user(len, optlen))
  875. return -EFAULT;
  876. return 0;
  877. }
  878. default:
  879. break;
  880. }
  881. return -EOPNOTSUPP;
  882. }
  883. static int xsk_mmap(struct file *file, struct socket *sock,
  884. struct vm_area_struct *vma)
  885. {
  886. loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
  887. unsigned long size = vma->vm_end - vma->vm_start;
  888. struct xdp_sock *xs = xdp_sk(sock->sk);
  889. struct xsk_queue *q = NULL;
  890. unsigned long pfn;
  891. struct page *qpg;
  892. if (READ_ONCE(xs->state) != XSK_READY)
  893. return -EBUSY;
  894. if (offset == XDP_PGOFF_RX_RING) {
  895. q = READ_ONCE(xs->rx);
  896. } else if (offset == XDP_PGOFF_TX_RING) {
  897. q = READ_ONCE(xs->tx);
  898. } else {
  899. /* Matches the smp_wmb() in XDP_UMEM_REG */
  900. smp_rmb();
  901. if (offset == XDP_UMEM_PGOFF_FILL_RING)
  902. q = READ_ONCE(xs->fq_tmp);
  903. else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
  904. q = READ_ONCE(xs->cq_tmp);
  905. }
  906. if (!q)
  907. return -EINVAL;
  908. /* Matches the smp_wmb() in xsk_init_queue */
  909. smp_rmb();
  910. qpg = virt_to_head_page(q->ring);
  911. if (size > page_size(qpg))
  912. return -EINVAL;
  913. pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
  914. return remap_pfn_range(vma, vma->vm_start, pfn,
  915. size, vma->vm_page_prot);
  916. }
  917. static int xsk_notifier(struct notifier_block *this,
  918. unsigned long msg, void *ptr)
  919. {
  920. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  921. struct net *net = dev_net(dev);
  922. struct sock *sk;
  923. switch (msg) {
  924. case NETDEV_UNREGISTER:
  925. mutex_lock(&net->xdp.lock);
  926. sk_for_each(sk, &net->xdp.list) {
  927. struct xdp_sock *xs = xdp_sk(sk);
  928. mutex_lock(&xs->mutex);
  929. if (xs->dev == dev) {
  930. sk->sk_err = ENETDOWN;
  931. if (!sock_flag(sk, SOCK_DEAD))
  932. sk->sk_error_report(sk);
  933. xsk_unbind_dev(xs);
  934. /* Clear device references. */
  935. xp_clear_dev(xs->pool);
  936. }
  937. mutex_unlock(&xs->mutex);
  938. }
  939. mutex_unlock(&net->xdp.lock);
  940. break;
  941. }
  942. return NOTIFY_DONE;
  943. }
  944. static struct proto xsk_proto = {
  945. .name = "XDP",
  946. .owner = THIS_MODULE,
  947. .obj_size = sizeof(struct xdp_sock),
  948. };
  949. static const struct proto_ops xsk_proto_ops = {
  950. .family = PF_XDP,
  951. .owner = THIS_MODULE,
  952. .release = xsk_release,
  953. .bind = xsk_bind,
  954. .connect = sock_no_connect,
  955. .socketpair = sock_no_socketpair,
  956. .accept = sock_no_accept,
  957. .getname = sock_no_getname,
  958. .poll = xsk_poll,
  959. .ioctl = sock_no_ioctl,
  960. .listen = sock_no_listen,
  961. .shutdown = sock_no_shutdown,
  962. .setsockopt = xsk_setsockopt,
  963. .getsockopt = xsk_getsockopt,
  964. .sendmsg = xsk_sendmsg,
  965. .recvmsg = sock_no_recvmsg,
  966. .mmap = xsk_mmap,
  967. .sendpage = sock_no_sendpage,
  968. };
  969. static void xsk_destruct(struct sock *sk)
  970. {
  971. struct xdp_sock *xs = xdp_sk(sk);
  972. if (!sock_flag(sk, SOCK_DEAD))
  973. return;
  974. if (!xp_put_pool(xs->pool))
  975. xdp_put_umem(xs->umem, !xs->pool);
  976. sk_refcnt_debug_dec(sk);
  977. }
  978. static int xsk_create(struct net *net, struct socket *sock, int protocol,
  979. int kern)
  980. {
  981. struct xdp_sock *xs;
  982. struct sock *sk;
  983. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  984. return -EPERM;
  985. if (sock->type != SOCK_RAW)
  986. return -ESOCKTNOSUPPORT;
  987. if (protocol)
  988. return -EPROTONOSUPPORT;
  989. sock->state = SS_UNCONNECTED;
  990. sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
  991. if (!sk)
  992. return -ENOBUFS;
  993. sock->ops = &xsk_proto_ops;
  994. sock_init_data(sock, sk);
  995. sk->sk_family = PF_XDP;
  996. sk->sk_destruct = xsk_destruct;
  997. sk_refcnt_debug_inc(sk);
  998. sock_set_flag(sk, SOCK_RCU_FREE);
  999. xs = xdp_sk(sk);
  1000. xs->state = XSK_READY;
  1001. mutex_init(&xs->mutex);
  1002. spin_lock_init(&xs->rx_lock);
  1003. INIT_LIST_HEAD(&xs->map_list);
  1004. spin_lock_init(&xs->map_list_lock);
  1005. mutex_lock(&net->xdp.lock);
  1006. sk_add_node_rcu(sk, &net->xdp.list);
  1007. mutex_unlock(&net->xdp.lock);
  1008. local_bh_disable();
  1009. sock_prot_inuse_add(net, &xsk_proto, 1);
  1010. local_bh_enable();
  1011. return 0;
  1012. }
  1013. static const struct net_proto_family xsk_family_ops = {
  1014. .family = PF_XDP,
  1015. .create = xsk_create,
  1016. .owner = THIS_MODULE,
  1017. };
  1018. static struct notifier_block xsk_netdev_notifier = {
  1019. .notifier_call = xsk_notifier,
  1020. };
  1021. static int __net_init xsk_net_init(struct net *net)
  1022. {
  1023. mutex_init(&net->xdp.lock);
  1024. INIT_HLIST_HEAD(&net->xdp.list);
  1025. return 0;
  1026. }
  1027. static void __net_exit xsk_net_exit(struct net *net)
  1028. {
  1029. WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
  1030. }
  1031. static struct pernet_operations xsk_net_ops = {
  1032. .init = xsk_net_init,
  1033. .exit = xsk_net_exit,
  1034. };
  1035. static int __init xsk_init(void)
  1036. {
  1037. int err, cpu;
  1038. err = proto_register(&xsk_proto, 0 /* no slab */);
  1039. if (err)
  1040. goto out;
  1041. err = sock_register(&xsk_family_ops);
  1042. if (err)
  1043. goto out_proto;
  1044. err = register_pernet_subsys(&xsk_net_ops);
  1045. if (err)
  1046. goto out_sk;
  1047. err = register_netdevice_notifier(&xsk_netdev_notifier);
  1048. if (err)
  1049. goto out_pernet;
  1050. for_each_possible_cpu(cpu)
  1051. INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
  1052. return 0;
  1053. out_pernet:
  1054. unregister_pernet_subsys(&xsk_net_ops);
  1055. out_sk:
  1056. sock_unregister(PF_XDP);
  1057. out_proto:
  1058. proto_unregister(&xsk_proto);
  1059. out:
  1060. return err;
  1061. }
  1062. fs_initcall(xsk_init);