xfrm_device.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * xfrm_device.c - IPsec device offloading code.
  4. *
  5. * Copyright (c) 2015 secunet Security Networks AG
  6. *
  7. * Author:
  8. * Steffen Klassert <steffen.klassert@secunet.com>
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/module.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/slab.h>
  15. #include <linux/spinlock.h>
  16. #include <net/dst.h>
  17. #include <net/xfrm.h>
  18. #include <linux/notifier.h>
  19. #ifdef CONFIG_XFRM_OFFLOAD
  20. static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
  21. unsigned int hsize)
  22. {
  23. struct xfrm_offload *xo = xfrm_offload(skb);
  24. skb_reset_mac_len(skb);
  25. if (xo->flags & XFRM_GSO_SEGMENT)
  26. skb->transport_header -= x->props.header_len;
  27. pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
  28. }
  29. static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
  30. unsigned int hsize)
  31. {
  32. struct xfrm_offload *xo = xfrm_offload(skb);
  33. if (xo->flags & XFRM_GSO_SEGMENT)
  34. skb->transport_header = skb->network_header + hsize;
  35. skb_reset_mac_len(skb);
  36. pskb_pull(skb, skb->mac_len + x->props.header_len);
  37. }
  38. static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
  39. unsigned int hsize)
  40. {
  41. struct xfrm_offload *xo = xfrm_offload(skb);
  42. int phlen = 0;
  43. if (xo->flags & XFRM_GSO_SEGMENT)
  44. skb->transport_header = skb->network_header + hsize;
  45. skb_reset_mac_len(skb);
  46. if (x->sel.family != AF_INET6) {
  47. phlen = IPV4_BEET_PHMAXLEN;
  48. if (x->outer_mode.family == AF_INET6)
  49. phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
  50. }
  51. pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
  52. }
  53. /* Adjust pointers into the packet when IPsec is done at layer2 */
  54. static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
  55. {
  56. switch (x->outer_mode.encap) {
  57. case XFRM_MODE_TUNNEL:
  58. if (x->outer_mode.family == AF_INET)
  59. return __xfrm_mode_tunnel_prep(x, skb,
  60. sizeof(struct iphdr));
  61. if (x->outer_mode.family == AF_INET6)
  62. return __xfrm_mode_tunnel_prep(x, skb,
  63. sizeof(struct ipv6hdr));
  64. break;
  65. case XFRM_MODE_TRANSPORT:
  66. if (x->outer_mode.family == AF_INET)
  67. return __xfrm_transport_prep(x, skb,
  68. sizeof(struct iphdr));
  69. if (x->outer_mode.family == AF_INET6)
  70. return __xfrm_transport_prep(x, skb,
  71. sizeof(struct ipv6hdr));
  72. break;
  73. case XFRM_MODE_BEET:
  74. if (x->outer_mode.family == AF_INET)
  75. return __xfrm_mode_beet_prep(x, skb,
  76. sizeof(struct iphdr));
  77. if (x->outer_mode.family == AF_INET6)
  78. return __xfrm_mode_beet_prep(x, skb,
  79. sizeof(struct ipv6hdr));
  80. break;
  81. case XFRM_MODE_ROUTEOPTIMIZATION:
  82. case XFRM_MODE_IN_TRIGGER:
  83. break;
  84. }
  85. }
  86. struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
  87. {
  88. int err;
  89. unsigned long flags;
  90. struct xfrm_state *x;
  91. struct softnet_data *sd;
  92. struct sk_buff *skb2, *nskb, *pskb = NULL;
  93. netdev_features_t esp_features = features;
  94. struct xfrm_offload *xo = xfrm_offload(skb);
  95. struct net_device *dev = skb->dev;
  96. struct sec_path *sp;
  97. if (!xo || (xo->flags & XFRM_XMIT))
  98. return skb;
  99. if (!(features & NETIF_F_HW_ESP))
  100. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
  101. sp = skb_sec_path(skb);
  102. x = sp->xvec[sp->len - 1];
  103. if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
  104. return skb;
  105. /* This skb was already validated on the upper/virtual dev */
  106. if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
  107. return skb;
  108. local_irq_save(flags);
  109. sd = this_cpu_ptr(&softnet_data);
  110. err = !skb_queue_empty(&sd->xfrm_backlog);
  111. local_irq_restore(flags);
  112. if (err) {
  113. *again = true;
  114. return skb;
  115. }
  116. if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
  117. struct sk_buff *segs;
  118. /* Packet got rerouted, fixup features and segment it. */
  119. esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
  120. segs = skb_gso_segment(skb, esp_features);
  121. if (IS_ERR(segs)) {
  122. kfree_skb(skb);
  123. atomic_long_inc(&dev->tx_dropped);
  124. return NULL;
  125. } else {
  126. consume_skb(skb);
  127. skb = segs;
  128. }
  129. }
  130. if (!skb->next) {
  131. esp_features |= skb->dev->gso_partial_features;
  132. xfrm_outer_mode_prep(x, skb);
  133. xo->flags |= XFRM_DEV_RESUME;
  134. err = x->type_offload->xmit(x, skb, esp_features);
  135. if (err) {
  136. if (err == -EINPROGRESS)
  137. return NULL;
  138. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  139. kfree_skb(skb);
  140. return NULL;
  141. }
  142. skb_push(skb, skb->data - skb_mac_header(skb));
  143. return skb;
  144. }
  145. skb_list_walk_safe(skb, skb2, nskb) {
  146. esp_features |= skb->dev->gso_partial_features;
  147. skb_mark_not_on_list(skb2);
  148. xo = xfrm_offload(skb2);
  149. xo->flags |= XFRM_DEV_RESUME;
  150. xfrm_outer_mode_prep(x, skb2);
  151. err = x->type_offload->xmit(x, skb2, esp_features);
  152. if (!err) {
  153. skb2->next = nskb;
  154. } else if (err != -EINPROGRESS) {
  155. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  156. skb2->next = nskb;
  157. kfree_skb_list(skb2);
  158. return NULL;
  159. } else {
  160. if (skb == skb2)
  161. skb = nskb;
  162. else
  163. pskb->next = nskb;
  164. continue;
  165. }
  166. skb_push(skb2, skb2->data - skb_mac_header(skb2));
  167. pskb = skb2;
  168. }
  169. return skb;
  170. }
  171. EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
  172. int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
  173. struct xfrm_user_offload *xuo)
  174. {
  175. int err;
  176. struct dst_entry *dst;
  177. struct net_device *dev;
  178. struct xfrm_state_offload *xso = &x->xso;
  179. xfrm_address_t *saddr;
  180. xfrm_address_t *daddr;
  181. if (!x->type_offload)
  182. return -EINVAL;
  183. /* We don't yet support UDP encapsulation and TFC padding. */
  184. if (x->encap || x->tfcpad)
  185. return -EINVAL;
  186. if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
  187. return -EINVAL;
  188. dev = dev_get_by_index(net, xuo->ifindex);
  189. if (!dev) {
  190. if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
  191. saddr = &x->props.saddr;
  192. daddr = &x->id.daddr;
  193. } else {
  194. saddr = &x->id.daddr;
  195. daddr = &x->props.saddr;
  196. }
  197. dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
  198. x->props.family,
  199. xfrm_smark_get(0, x));
  200. if (IS_ERR(dst))
  201. return 0;
  202. dev = dst->dev;
  203. dev_hold(dev);
  204. dst_release(dst);
  205. }
  206. if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
  207. xso->dev = NULL;
  208. dev_put(dev);
  209. return 0;
  210. }
  211. if (x->props.flags & XFRM_STATE_ESN &&
  212. !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
  213. xso->dev = NULL;
  214. dev_put(dev);
  215. return -EINVAL;
  216. }
  217. xso->dev = dev;
  218. xso->real_dev = dev;
  219. xso->num_exthdrs = 1;
  220. /* Don't forward bit that is not implemented */
  221. xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
  222. err = dev->xfrmdev_ops->xdo_dev_state_add(x);
  223. if (err) {
  224. xso->num_exthdrs = 0;
  225. xso->flags = 0;
  226. xso->dev = NULL;
  227. xso->real_dev = NULL;
  228. dev_put(dev);
  229. if (err != -EOPNOTSUPP)
  230. return err;
  231. }
  232. return 0;
  233. }
  234. EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
  235. bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
  236. {
  237. int mtu;
  238. struct dst_entry *dst = skb_dst(skb);
  239. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  240. struct net_device *dev = x->xso.dev;
  241. if (!x->type_offload || x->encap)
  242. return false;
  243. if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
  244. (!xdst->child->xfrm)) {
  245. mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
  246. if (skb->len <= mtu)
  247. goto ok;
  248. if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
  249. goto ok;
  250. }
  251. return false;
  252. ok:
  253. if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
  254. return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
  255. return true;
  256. }
  257. EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
  258. void xfrm_dev_resume(struct sk_buff *skb)
  259. {
  260. struct net_device *dev = skb->dev;
  261. int ret = NETDEV_TX_BUSY;
  262. struct netdev_queue *txq;
  263. struct softnet_data *sd;
  264. unsigned long flags;
  265. rcu_read_lock();
  266. txq = netdev_core_pick_tx(dev, skb, NULL);
  267. HARD_TX_LOCK(dev, txq, smp_processor_id());
  268. if (!netif_xmit_frozen_or_stopped(txq))
  269. skb = dev_hard_start_xmit(skb, dev, txq, &ret);
  270. HARD_TX_UNLOCK(dev, txq);
  271. if (!dev_xmit_complete(ret)) {
  272. local_irq_save(flags);
  273. sd = this_cpu_ptr(&softnet_data);
  274. skb_queue_tail(&sd->xfrm_backlog, skb);
  275. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  276. local_irq_restore(flags);
  277. }
  278. rcu_read_unlock();
  279. }
  280. EXPORT_SYMBOL_GPL(xfrm_dev_resume);
  281. void xfrm_dev_backlog(struct softnet_data *sd)
  282. {
  283. struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
  284. struct sk_buff_head list;
  285. struct sk_buff *skb;
  286. if (skb_queue_empty(xfrm_backlog))
  287. return;
  288. __skb_queue_head_init(&list);
  289. spin_lock(&xfrm_backlog->lock);
  290. skb_queue_splice_init(xfrm_backlog, &list);
  291. spin_unlock(&xfrm_backlog->lock);
  292. while (!skb_queue_empty(&list)) {
  293. skb = __skb_dequeue(&list);
  294. xfrm_dev_resume(skb);
  295. }
  296. }
  297. #endif
  298. static int xfrm_api_check(struct net_device *dev)
  299. {
  300. #ifdef CONFIG_XFRM_OFFLOAD
  301. if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
  302. !(dev->features & NETIF_F_HW_ESP))
  303. return NOTIFY_BAD;
  304. if ((dev->features & NETIF_F_HW_ESP) &&
  305. (!(dev->xfrmdev_ops &&
  306. dev->xfrmdev_ops->xdo_dev_state_add &&
  307. dev->xfrmdev_ops->xdo_dev_state_delete)))
  308. return NOTIFY_BAD;
  309. #else
  310. if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
  311. return NOTIFY_BAD;
  312. #endif
  313. return NOTIFY_DONE;
  314. }
  315. static int xfrm_dev_register(struct net_device *dev)
  316. {
  317. return xfrm_api_check(dev);
  318. }
  319. static int xfrm_dev_feat_change(struct net_device *dev)
  320. {
  321. return xfrm_api_check(dev);
  322. }
  323. static int xfrm_dev_down(struct net_device *dev)
  324. {
  325. if (dev->features & NETIF_F_HW_ESP)
  326. xfrm_dev_state_flush(dev_net(dev), dev, true);
  327. return NOTIFY_DONE;
  328. }
  329. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  330. {
  331. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  332. switch (event) {
  333. case NETDEV_REGISTER:
  334. return xfrm_dev_register(dev);
  335. case NETDEV_FEAT_CHANGE:
  336. return xfrm_dev_feat_change(dev);
  337. case NETDEV_DOWN:
  338. case NETDEV_UNREGISTER:
  339. return xfrm_dev_down(dev);
  340. }
  341. return NOTIFY_DONE;
  342. }
  343. static struct notifier_block xfrm_dev_notifier = {
  344. .notifier_call = xfrm_dev_event,
  345. };
  346. void __init xfrm_dev_init(void)
  347. {
  348. register_netdevice_notifier(&xfrm_dev_notifier);
  349. }