vport-internal_dev.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2007-2012 Nicira, Inc.
  4. */
  5. #include <linux/if_vlan.h>
  6. #include <linux/kernel.h>
  7. #include <linux/netdevice.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/ethtool.h>
  10. #include <linux/skbuff.h>
  11. #include <net/dst.h>
  12. #include <net/xfrm.h>
  13. #include <net/rtnetlink.h>
  14. #include "datapath.h"
  15. #include "vport-internal_dev.h"
  16. #include "vport-netdev.h"
  17. struct internal_dev {
  18. struct vport *vport;
  19. };
  20. static struct vport_ops ovs_internal_vport_ops;
  21. static struct internal_dev *internal_dev_priv(struct net_device *netdev)
  22. {
  23. return netdev_priv(netdev);
  24. }
  25. /* Called with rcu_read_lock_bh. */
  26. static netdev_tx_t
  27. internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
  28. {
  29. int len, err;
  30. len = skb->len;
  31. rcu_read_lock();
  32. err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
  33. rcu_read_unlock();
  34. if (likely(!err)) {
  35. struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);
  36. u64_stats_update_begin(&tstats->syncp);
  37. tstats->tx_bytes += len;
  38. tstats->tx_packets++;
  39. u64_stats_update_end(&tstats->syncp);
  40. } else {
  41. netdev->stats.tx_errors++;
  42. }
  43. return NETDEV_TX_OK;
  44. }
  45. static int internal_dev_open(struct net_device *netdev)
  46. {
  47. netif_start_queue(netdev);
  48. return 0;
  49. }
  50. static int internal_dev_stop(struct net_device *netdev)
  51. {
  52. netif_stop_queue(netdev);
  53. return 0;
  54. }
  55. static void internal_dev_getinfo(struct net_device *netdev,
  56. struct ethtool_drvinfo *info)
  57. {
  58. strlcpy(info->driver, "openvswitch", sizeof(info->driver));
  59. }
  60. static const struct ethtool_ops internal_dev_ethtool_ops = {
  61. .get_drvinfo = internal_dev_getinfo,
  62. .get_link = ethtool_op_get_link,
  63. };
  64. static void internal_dev_destructor(struct net_device *dev)
  65. {
  66. struct vport *vport = ovs_internal_dev_get_vport(dev);
  67. ovs_vport_free(vport);
  68. }
  69. static void
  70. internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
  71. {
  72. memset(stats, 0, sizeof(*stats));
  73. stats->rx_errors = dev->stats.rx_errors;
  74. stats->tx_errors = dev->stats.tx_errors;
  75. stats->tx_dropped = dev->stats.tx_dropped;
  76. stats->rx_dropped = dev->stats.rx_dropped;
  77. dev_fetch_sw_netstats(stats, dev->tstats);
  78. }
  79. static const struct net_device_ops internal_dev_netdev_ops = {
  80. .ndo_open = internal_dev_open,
  81. .ndo_stop = internal_dev_stop,
  82. .ndo_start_xmit = internal_dev_xmit,
  83. .ndo_set_mac_address = eth_mac_addr,
  84. .ndo_get_stats64 = internal_get_stats,
  85. };
  86. static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
  87. .kind = "openvswitch",
  88. };
  89. static void do_setup(struct net_device *netdev)
  90. {
  91. ether_setup(netdev);
  92. netdev->max_mtu = ETH_MAX_MTU;
  93. netdev->netdev_ops = &internal_dev_netdev_ops;
  94. netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
  95. netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
  96. IFF_NO_QUEUE;
  97. netdev->needs_free_netdev = true;
  98. netdev->priv_destructor = NULL;
  99. netdev->ethtool_ops = &internal_dev_ethtool_ops;
  100. netdev->rtnl_link_ops = &internal_dev_link_ops;
  101. netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
  102. NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
  103. NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
  104. netdev->vlan_features = netdev->features;
  105. netdev->hw_enc_features = netdev->features;
  106. netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
  107. netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
  108. eth_hw_addr_random(netdev);
  109. }
  110. static struct vport *internal_dev_create(const struct vport_parms *parms)
  111. {
  112. struct vport *vport;
  113. struct internal_dev *internal_dev;
  114. struct net_device *dev;
  115. int err;
  116. vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
  117. if (IS_ERR(vport)) {
  118. err = PTR_ERR(vport);
  119. goto error;
  120. }
  121. dev = alloc_netdev(sizeof(struct internal_dev),
  122. parms->name, NET_NAME_USER, do_setup);
  123. vport->dev = dev;
  124. if (!vport->dev) {
  125. err = -ENOMEM;
  126. goto error_free_vport;
  127. }
  128. vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  129. if (!vport->dev->tstats) {
  130. err = -ENOMEM;
  131. goto error_free_netdev;
  132. }
  133. dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
  134. internal_dev = internal_dev_priv(vport->dev);
  135. internal_dev->vport = vport;
  136. /* Restrict bridge port to current netns. */
  137. if (vport->port_no == OVSP_LOCAL)
  138. vport->dev->features |= NETIF_F_NETNS_LOCAL;
  139. rtnl_lock();
  140. err = register_netdevice(vport->dev);
  141. if (err)
  142. goto error_unlock;
  143. vport->dev->priv_destructor = internal_dev_destructor;
  144. dev_set_promiscuity(vport->dev, 1);
  145. rtnl_unlock();
  146. netif_start_queue(vport->dev);
  147. return vport;
  148. error_unlock:
  149. rtnl_unlock();
  150. free_percpu(dev->tstats);
  151. error_free_netdev:
  152. free_netdev(dev);
  153. error_free_vport:
  154. ovs_vport_free(vport);
  155. error:
  156. return ERR_PTR(err);
  157. }
  158. static void internal_dev_destroy(struct vport *vport)
  159. {
  160. netif_stop_queue(vport->dev);
  161. rtnl_lock();
  162. dev_set_promiscuity(vport->dev, -1);
  163. /* unregister_netdevice() waits for an RCU grace period. */
  164. unregister_netdevice(vport->dev);
  165. free_percpu(vport->dev->tstats);
  166. rtnl_unlock();
  167. }
  168. static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
  169. {
  170. struct net_device *netdev = skb->dev;
  171. if (unlikely(!(netdev->flags & IFF_UP))) {
  172. kfree_skb(skb);
  173. netdev->stats.rx_dropped++;
  174. return NETDEV_TX_OK;
  175. }
  176. skb_dst_drop(skb);
  177. nf_reset_ct(skb);
  178. secpath_reset(skb);
  179. skb->pkt_type = PACKET_HOST;
  180. skb->protocol = eth_type_trans(skb, netdev);
  181. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  182. dev_sw_netstats_rx_add(netdev, skb->len);
  183. netif_rx(skb);
  184. return NETDEV_TX_OK;
  185. }
  186. static struct vport_ops ovs_internal_vport_ops = {
  187. .type = OVS_VPORT_TYPE_INTERNAL,
  188. .create = internal_dev_create,
  189. .destroy = internal_dev_destroy,
  190. .send = internal_dev_recv,
  191. };
  192. int ovs_is_internal_dev(const struct net_device *netdev)
  193. {
  194. return netdev->netdev_ops == &internal_dev_netdev_ops;
  195. }
  196. struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
  197. {
  198. if (!ovs_is_internal_dev(netdev))
  199. return NULL;
  200. return internal_dev_priv(netdev)->vport;
  201. }
  202. int ovs_internal_dev_rtnl_link_register(void)
  203. {
  204. int err;
  205. err = rtnl_link_register(&internal_dev_link_ops);
  206. if (err < 0)
  207. return err;
  208. err = ovs_vport_ops_register(&ovs_internal_vport_ops);
  209. if (err < 0)
  210. rtnl_link_unregister(&internal_dev_link_ops);
  211. return err;
  212. }
  213. void ovs_internal_dev_rtnl_link_unregister(void)
  214. {
  215. ovs_vport_ops_unregister(&ovs_internal_vport_ops);
  216. rtnl_link_unregister(&internal_dev_link_ops);
  217. }