gro_cells.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/skbuff.h>
  3. #include <linux/slab.h>
  4. #include <linux/netdevice.h>
  5. #include <net/gro_cells.h>
  6. struct gro_cell {
  7. struct sk_buff_head napi_skbs;
  8. struct napi_struct napi;
  9. };
  10. int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
  11. {
  12. struct net_device *dev = skb->dev;
  13. struct gro_cell *cell;
  14. int res;
  15. rcu_read_lock();
  16. if (unlikely(!(dev->flags & IFF_UP)))
  17. goto drop;
  18. if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
  19. res = netif_rx(skb);
  20. goto unlock;
  21. }
  22. cell = this_cpu_ptr(gcells->cells);
  23. if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
  24. drop:
  25. atomic_long_inc(&dev->rx_dropped);
  26. kfree_skb(skb);
  27. res = NET_RX_DROP;
  28. goto unlock;
  29. }
  30. __skb_queue_tail(&cell->napi_skbs, skb);
  31. if (skb_queue_len(&cell->napi_skbs) == 1)
  32. napi_schedule(&cell->napi);
  33. res = NET_RX_SUCCESS;
  34. unlock:
  35. rcu_read_unlock();
  36. return res;
  37. }
  38. EXPORT_SYMBOL(gro_cells_receive);
  39. /* called under BH context */
  40. static int gro_cell_poll(struct napi_struct *napi, int budget)
  41. {
  42. struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
  43. struct sk_buff *skb;
  44. int work_done = 0;
  45. while (work_done < budget) {
  46. skb = __skb_dequeue(&cell->napi_skbs);
  47. if (!skb)
  48. break;
  49. napi_gro_receive(napi, skb);
  50. work_done++;
  51. }
  52. if (work_done < budget)
  53. napi_complete_done(napi, work_done);
  54. return work_done;
  55. }
  56. int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
  57. {
  58. int i;
  59. gcells->cells = alloc_percpu(struct gro_cell);
  60. if (!gcells->cells)
  61. return -ENOMEM;
  62. for_each_possible_cpu(i) {
  63. struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  64. __skb_queue_head_init(&cell->napi_skbs);
  65. set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
  66. netif_napi_add(dev, &cell->napi, gro_cell_poll,
  67. NAPI_POLL_WEIGHT);
  68. napi_enable(&cell->napi);
  69. }
  70. return 0;
  71. }
  72. EXPORT_SYMBOL(gro_cells_init);
  73. void gro_cells_destroy(struct gro_cells *gcells)
  74. {
  75. int i;
  76. if (!gcells->cells)
  77. return;
  78. for_each_possible_cpu(i) {
  79. struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  80. napi_disable(&cell->napi);
  81. __netif_napi_del(&cell->napi);
  82. __skb_queue_purge(&cell->napi_skbs);
  83. }
  84. /* This barrier is needed because netpoll could access dev->napi_list
  85. * under rcu protection.
  86. */
  87. synchronize_net();
  88. free_percpu(gcells->cells);
  89. gcells->cells = NULL;
  90. }
  91. EXPORT_SYMBOL(gro_cells_destroy);