xfrm_output.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * xfrm_output.c - Common IPsec encapsulation code.
  4. *
  5. * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
  6. */
  7. #include <linux/errno.h>
  8. #include <linux/module.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/netfilter.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <net/dst.h>
  15. #include <net/icmp.h>
  16. #include <net/inet_ecn.h>
  17. #include <net/xfrm.h>
  18. #if IS_ENABLED(CONFIG_IPV6)
  19. #include <net/ip6_route.h>
  20. #include <net/ipv6_stubs.h>
  21. #endif
  22. #include "xfrm_inout.h"
  23. static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
  24. static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
  25. static int xfrm_skb_check_space(struct sk_buff *skb)
  26. {
  27. struct dst_entry *dst = skb_dst(skb);
  28. int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
  29. - skb_headroom(skb);
  30. int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
  31. if (nhead <= 0) {
  32. if (ntail <= 0)
  33. return 0;
  34. nhead = 0;
  35. } else if (ntail < 0)
  36. ntail = 0;
  37. return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
  38. }
  39. /* Children define the path of the packet through the
  40. * Linux networking. Thus, destinations are stackable.
  41. */
  42. static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
  43. {
  44. struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
  45. skb_dst_drop(skb);
  46. return child;
  47. }
  48. /* Add encapsulation header.
  49. *
  50. * The IP header will be moved forward to make space for the encapsulation
  51. * header.
  52. */
  53. static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
  54. {
  55. struct iphdr *iph = ip_hdr(skb);
  56. int ihl = iph->ihl * 4;
  57. skb_set_inner_transport_header(skb, skb_transport_offset(skb));
  58. skb_set_network_header(skb, -x->props.header_len);
  59. skb->mac_header = skb->network_header +
  60. offsetof(struct iphdr, protocol);
  61. skb->transport_header = skb->network_header + ihl;
  62. __skb_pull(skb, ihl);
  63. memmove(skb_network_header(skb), iph, ihl);
  64. return 0;
  65. }
  66. /* Add encapsulation header.
  67. *
  68. * The IP header and mutable extension headers will be moved forward to make
  69. * space for the encapsulation header.
  70. */
  71. static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
  72. {
  73. #if IS_ENABLED(CONFIG_IPV6)
  74. struct ipv6hdr *iph;
  75. u8 *prevhdr;
  76. int hdr_len;
  77. iph = ipv6_hdr(skb);
  78. skb_set_inner_transport_header(skb, skb_transport_offset(skb));
  79. hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
  80. if (hdr_len < 0)
  81. return hdr_len;
  82. skb_set_mac_header(skb,
  83. (prevhdr - x->props.header_len) - skb->data);
  84. skb_set_network_header(skb, -x->props.header_len);
  85. skb->transport_header = skb->network_header + hdr_len;
  86. __skb_pull(skb, hdr_len);
  87. memmove(ipv6_hdr(skb), iph, hdr_len);
  88. return 0;
  89. #else
  90. WARN_ON_ONCE(1);
  91. return -EAFNOSUPPORT;
  92. #endif
  93. }
  94. /* Add route optimization header space.
  95. *
  96. * The IP header and mutable extension headers will be moved forward to make
  97. * space for the route optimization header.
  98. */
  99. static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
  100. {
  101. #if IS_ENABLED(CONFIG_IPV6)
  102. struct ipv6hdr *iph;
  103. u8 *prevhdr;
  104. int hdr_len;
  105. iph = ipv6_hdr(skb);
  106. hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
  107. if (hdr_len < 0)
  108. return hdr_len;
  109. skb_set_mac_header(skb,
  110. (prevhdr - x->props.header_len) - skb->data);
  111. skb_set_network_header(skb, -x->props.header_len);
  112. skb->transport_header = skb->network_header + hdr_len;
  113. __skb_pull(skb, hdr_len);
  114. memmove(ipv6_hdr(skb), iph, hdr_len);
  115. x->lastused = ktime_get_real_seconds();
  116. return 0;
  117. #else
  118. WARN_ON_ONCE(1);
  119. return -EAFNOSUPPORT;
  120. #endif
  121. }
  122. /* Add encapsulation header.
  123. *
  124. * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
  125. */
  126. static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
  127. {
  128. struct ip_beet_phdr *ph;
  129. struct iphdr *top_iph;
  130. int hdrlen, optlen;
  131. hdrlen = 0;
  132. optlen = XFRM_MODE_SKB_CB(skb)->optlen;
  133. if (unlikely(optlen))
  134. hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
  135. skb_set_network_header(skb, -x->props.header_len - hdrlen +
  136. (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
  137. if (x->sel.family != AF_INET6)
  138. skb->network_header += IPV4_BEET_PHMAXLEN;
  139. skb->mac_header = skb->network_header +
  140. offsetof(struct iphdr, protocol);
  141. skb->transport_header = skb->network_header + sizeof(*top_iph);
  142. xfrm4_beet_make_header(skb);
  143. ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
  144. top_iph = ip_hdr(skb);
  145. if (unlikely(optlen)) {
  146. if (WARN_ON(optlen < 0))
  147. return -EINVAL;
  148. ph->padlen = 4 - (optlen & 4);
  149. ph->hdrlen = optlen / 8;
  150. ph->nexthdr = top_iph->protocol;
  151. if (ph->padlen)
  152. memset(ph + 1, IPOPT_NOP, ph->padlen);
  153. top_iph->protocol = IPPROTO_BEETPH;
  154. top_iph->ihl = sizeof(struct iphdr) / 4;
  155. }
  156. top_iph->saddr = x->props.saddr.a4;
  157. top_iph->daddr = x->id.daddr.a4;
  158. return 0;
  159. }
  160. /* Add encapsulation header.
  161. *
  162. * The top IP header will be constructed per RFC 2401.
  163. */
  164. static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
  165. {
  166. struct dst_entry *dst = skb_dst(skb);
  167. struct iphdr *top_iph;
  168. int flags;
  169. skb_set_inner_network_header(skb, skb_network_offset(skb));
  170. skb_set_inner_transport_header(skb, skb_transport_offset(skb));
  171. skb_set_network_header(skb, -x->props.header_len);
  172. skb->mac_header = skb->network_header +
  173. offsetof(struct iphdr, protocol);
  174. skb->transport_header = skb->network_header + sizeof(*top_iph);
  175. top_iph = ip_hdr(skb);
  176. top_iph->ihl = 5;
  177. top_iph->version = 4;
  178. top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
  179. /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
  180. if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
  181. top_iph->tos = 0;
  182. else
  183. top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
  184. top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
  185. XFRM_MODE_SKB_CB(skb)->tos);
  186. flags = x->props.flags;
  187. if (flags & XFRM_STATE_NOECN)
  188. IP_ECN_clear(top_iph);
  189. top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
  190. 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
  191. top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
  192. top_iph->saddr = x->props.saddr.a4;
  193. top_iph->daddr = x->id.daddr.a4;
  194. ip_select_ident(dev_net(dst->dev), skb, NULL);
  195. return 0;
  196. }
  197. #if IS_ENABLED(CONFIG_IPV6)
  198. static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
  199. {
  200. struct dst_entry *dst = skb_dst(skb);
  201. struct ipv6hdr *top_iph;
  202. int dsfield;
  203. skb_set_inner_network_header(skb, skb_network_offset(skb));
  204. skb_set_inner_transport_header(skb, skb_transport_offset(skb));
  205. skb_set_network_header(skb, -x->props.header_len);
  206. skb->mac_header = skb->network_header +
  207. offsetof(struct ipv6hdr, nexthdr);
  208. skb->transport_header = skb->network_header + sizeof(*top_iph);
  209. top_iph = ipv6_hdr(skb);
  210. top_iph->version = 6;
  211. memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
  212. sizeof(top_iph->flow_lbl));
  213. top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
  214. if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
  215. dsfield = 0;
  216. else
  217. dsfield = XFRM_MODE_SKB_CB(skb)->tos;
  218. dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
  219. if (x->props.flags & XFRM_STATE_NOECN)
  220. dsfield &= ~INET_ECN_MASK;
  221. ipv6_change_dsfield(top_iph, 0, dsfield);
  222. top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
  223. top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
  224. top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
  225. return 0;
  226. }
  227. static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
  228. {
  229. struct ipv6hdr *top_iph;
  230. struct ip_beet_phdr *ph;
  231. int optlen, hdr_len;
  232. hdr_len = 0;
  233. optlen = XFRM_MODE_SKB_CB(skb)->optlen;
  234. if (unlikely(optlen))
  235. hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
  236. skb_set_network_header(skb, -x->props.header_len - hdr_len);
  237. if (x->sel.family != AF_INET6)
  238. skb->network_header += IPV4_BEET_PHMAXLEN;
  239. skb->mac_header = skb->network_header +
  240. offsetof(struct ipv6hdr, nexthdr);
  241. skb->transport_header = skb->network_header + sizeof(*top_iph);
  242. ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
  243. xfrm6_beet_make_header(skb);
  244. top_iph = ipv6_hdr(skb);
  245. if (unlikely(optlen)) {
  246. if (WARN_ON(optlen < 0))
  247. return -EINVAL;
  248. ph->padlen = 4 - (optlen & 4);
  249. ph->hdrlen = optlen / 8;
  250. ph->nexthdr = top_iph->nexthdr;
  251. if (ph->padlen)
  252. memset(ph + 1, IPOPT_NOP, ph->padlen);
  253. top_iph->nexthdr = IPPROTO_BEETPH;
  254. }
  255. top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
  256. top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
  257. return 0;
  258. }
  259. #endif
  260. /* Add encapsulation header.
  261. *
  262. * On exit, the transport header will be set to the start of the
  263. * encapsulation header to be filled in by x->type->output and the mac
  264. * header will be set to the nextheader (protocol for IPv4) field of the
  265. * extension header directly preceding the encapsulation header, or in
  266. * its absence, that of the top IP header.
  267. * The value of the network header will always point to the top IP header
  268. * while skb->data will point to the payload.
  269. */
  270. static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
  271. {
  272. int err;
  273. err = xfrm_inner_extract_output(x, skb);
  274. if (err)
  275. return err;
  276. IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
  277. skb->protocol = htons(ETH_P_IP);
  278. switch (x->outer_mode.encap) {
  279. case XFRM_MODE_BEET:
  280. return xfrm4_beet_encap_add(x, skb);
  281. case XFRM_MODE_TUNNEL:
  282. return xfrm4_tunnel_encap_add(x, skb);
  283. }
  284. WARN_ON_ONCE(1);
  285. return -EOPNOTSUPP;
  286. }
  287. static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
  288. {
  289. #if IS_ENABLED(CONFIG_IPV6)
  290. int err;
  291. err = xfrm_inner_extract_output(x, skb);
  292. if (err)
  293. return err;
  294. skb->ignore_df = 1;
  295. skb->protocol = htons(ETH_P_IPV6);
  296. switch (x->outer_mode.encap) {
  297. case XFRM_MODE_BEET:
  298. return xfrm6_beet_encap_add(x, skb);
  299. case XFRM_MODE_TUNNEL:
  300. return xfrm6_tunnel_encap_add(x, skb);
  301. default:
  302. WARN_ON_ONCE(1);
  303. return -EOPNOTSUPP;
  304. }
  305. #endif
  306. WARN_ON_ONCE(1);
  307. return -EAFNOSUPPORT;
  308. }
  309. static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
  310. {
  311. switch (x->outer_mode.encap) {
  312. case XFRM_MODE_BEET:
  313. case XFRM_MODE_TUNNEL:
  314. if (x->outer_mode.family == AF_INET)
  315. return xfrm4_prepare_output(x, skb);
  316. if (x->outer_mode.family == AF_INET6)
  317. return xfrm6_prepare_output(x, skb);
  318. break;
  319. case XFRM_MODE_TRANSPORT:
  320. if (x->outer_mode.family == AF_INET)
  321. return xfrm4_transport_output(x, skb);
  322. if (x->outer_mode.family == AF_INET6)
  323. return xfrm6_transport_output(x, skb);
  324. break;
  325. case XFRM_MODE_ROUTEOPTIMIZATION:
  326. if (x->outer_mode.family == AF_INET6)
  327. return xfrm6_ro_output(x, skb);
  328. WARN_ON_ONCE(1);
  329. break;
  330. default:
  331. WARN_ON_ONCE(1);
  332. break;
  333. }
  334. return -EOPNOTSUPP;
  335. }
  336. int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
  337. {
  338. return xfrm_outer_mode_output(x, skb);
  339. }
  340. EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
  341. static int xfrm_output_one(struct sk_buff *skb, int err)
  342. {
  343. struct dst_entry *dst = skb_dst(skb);
  344. struct xfrm_state *x = dst->xfrm;
  345. struct net *net = xs_net(x);
  346. if (err <= 0)
  347. goto resume;
  348. do {
  349. err = xfrm_skb_check_space(skb);
  350. if (err) {
  351. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
  352. goto error_nolock;
  353. }
  354. skb->mark = xfrm_smark_get(skb->mark, x);
  355. err = xfrm_outer_mode_output(x, skb);
  356. if (err) {
  357. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
  358. goto error_nolock;
  359. }
  360. spin_lock_bh(&x->lock);
  361. if (unlikely(x->km.state != XFRM_STATE_VALID)) {
  362. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
  363. err = -EINVAL;
  364. goto error;
  365. }
  366. err = xfrm_state_check_expire(x);
  367. if (err) {
  368. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
  369. goto error;
  370. }
  371. err = x->repl->overflow(x, skb);
  372. if (err) {
  373. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
  374. goto error;
  375. }
  376. x->curlft.bytes += skb->len;
  377. x->curlft.packets++;
  378. spin_unlock_bh(&x->lock);
  379. skb_dst_force(skb);
  380. if (!skb_dst(skb)) {
  381. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
  382. err = -EHOSTUNREACH;
  383. goto error_nolock;
  384. }
  385. if (xfrm_offload(skb)) {
  386. x->type_offload->encap(x, skb);
  387. } else {
  388. /* Inner headers are invalid now. */
  389. skb->encapsulation = 0;
  390. err = x->type->output(x, skb);
  391. if (err == -EINPROGRESS)
  392. goto out;
  393. }
  394. resume:
  395. if (err) {
  396. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  397. goto error_nolock;
  398. }
  399. dst = skb_dst_pop(skb);
  400. if (!dst) {
  401. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
  402. err = -EHOSTUNREACH;
  403. goto error_nolock;
  404. }
  405. skb_dst_set(skb, dst);
  406. x = dst->xfrm;
  407. } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
  408. return 0;
  409. error:
  410. spin_unlock_bh(&x->lock);
  411. error_nolock:
  412. kfree_skb(skb);
  413. out:
  414. return err;
  415. }
  416. int xfrm_output_resume(struct sk_buff *skb, int err)
  417. {
  418. struct net *net = xs_net(skb_dst(skb)->xfrm);
  419. while (likely((err = xfrm_output_one(skb, err)) == 0)) {
  420. nf_reset_ct(skb);
  421. err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
  422. if (unlikely(err != 1))
  423. goto out;
  424. if (!skb_dst(skb)->xfrm)
  425. return dst_output(net, skb->sk, skb);
  426. err = nf_hook(skb_dst(skb)->ops->family,
  427. NF_INET_POST_ROUTING, net, skb->sk, skb,
  428. NULL, skb_dst(skb)->dev, xfrm_output2);
  429. if (unlikely(err != 1))
  430. goto out;
  431. }
  432. if (err == -EINPROGRESS)
  433. err = 0;
  434. out:
  435. return err;
  436. }
  437. EXPORT_SYMBOL_GPL(xfrm_output_resume);
  438. static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
  439. {
  440. return xfrm_output_resume(skb, 1);
  441. }
  442. static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
  443. {
  444. struct sk_buff *segs, *nskb;
  445. BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
  446. BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
  447. segs = skb_gso_segment(skb, 0);
  448. kfree_skb(skb);
  449. if (IS_ERR(segs))
  450. return PTR_ERR(segs);
  451. if (segs == NULL)
  452. return -EINVAL;
  453. skb_list_walk_safe(segs, segs, nskb) {
  454. int err;
  455. skb_mark_not_on_list(segs);
  456. err = xfrm_output2(net, sk, segs);
  457. if (unlikely(err)) {
  458. kfree_skb_list(nskb);
  459. return err;
  460. }
  461. }
  462. return 0;
  463. }
  464. int xfrm_output(struct sock *sk, struct sk_buff *skb)
  465. {
  466. struct net *net = dev_net(skb_dst(skb)->dev);
  467. struct xfrm_state *x = skb_dst(skb)->xfrm;
  468. int err;
  469. switch (x->outer_mode.family) {
  470. case AF_INET:
  471. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  472. IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
  473. break;
  474. case AF_INET6:
  475. memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
  476. IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
  477. break;
  478. }
  479. secpath_reset(skb);
  480. if (xfrm_dev_offload_ok(skb, x)) {
  481. struct sec_path *sp;
  482. sp = secpath_set(skb);
  483. if (!sp) {
  484. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
  485. kfree_skb(skb);
  486. return -ENOMEM;
  487. }
  488. skb->encapsulation = 1;
  489. sp->olen++;
  490. sp->xvec[sp->len++] = x;
  491. xfrm_state_hold(x);
  492. if (skb_is_gso(skb)) {
  493. if (skb->inner_protocol)
  494. return xfrm_output_gso(net, sk, skb);
  495. skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
  496. goto out;
  497. }
  498. if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
  499. goto out;
  500. } else {
  501. if (skb_is_gso(skb))
  502. return xfrm_output_gso(net, sk, skb);
  503. }
  504. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  505. err = skb_checksum_help(skb);
  506. if (err) {
  507. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
  508. kfree_skb(skb);
  509. return err;
  510. }
  511. }
  512. out:
  513. return xfrm_output2(net, sk, skb);
  514. }
  515. EXPORT_SYMBOL_GPL(xfrm_output);
  516. static int xfrm4_tunnel_check_size(struct sk_buff *skb)
  517. {
  518. int mtu, ret = 0;
  519. if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
  520. goto out;
  521. if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
  522. goto out;
  523. mtu = dst_mtu(skb_dst(skb));
  524. if ((!skb_is_gso(skb) && skb->len > mtu) ||
  525. (skb_is_gso(skb) &&
  526. !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
  527. skb->protocol = htons(ETH_P_IP);
  528. if (skb->sk)
  529. xfrm_local_error(skb, mtu);
  530. else
  531. icmp_send(skb, ICMP_DEST_UNREACH,
  532. ICMP_FRAG_NEEDED, htonl(mtu));
  533. ret = -EMSGSIZE;
  534. }
  535. out:
  536. return ret;
  537. }
  538. static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
  539. {
  540. int err;
  541. if (x->outer_mode.encap == XFRM_MODE_BEET &&
  542. ip_is_fragment(ip_hdr(skb))) {
  543. net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
  544. return -EAFNOSUPPORT;
  545. }
  546. err = xfrm4_tunnel_check_size(skb);
  547. if (err)
  548. return err;
  549. XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
  550. xfrm4_extract_header(skb);
  551. return 0;
  552. }
  553. #if IS_ENABLED(CONFIG_IPV6)
  554. static int xfrm6_tunnel_check_size(struct sk_buff *skb)
  555. {
  556. int mtu, ret = 0;
  557. struct dst_entry *dst = skb_dst(skb);
  558. if (skb->ignore_df)
  559. goto out;
  560. mtu = dst_mtu(dst);
  561. if (mtu < IPV6_MIN_MTU)
  562. mtu = IPV6_MIN_MTU;
  563. if ((!skb_is_gso(skb) && skb->len > mtu) ||
  564. (skb_is_gso(skb) &&
  565. !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
  566. skb->dev = dst->dev;
  567. skb->protocol = htons(ETH_P_IPV6);
  568. if (xfrm6_local_dontfrag(skb->sk))
  569. ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
  570. else if (skb->sk)
  571. xfrm_local_error(skb, mtu);
  572. else
  573. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  574. ret = -EMSGSIZE;
  575. }
  576. out:
  577. return ret;
  578. }
  579. #endif
  580. static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
  581. {
  582. #if IS_ENABLED(CONFIG_IPV6)
  583. int err;
  584. err = xfrm6_tunnel_check_size(skb);
  585. if (err)
  586. return err;
  587. XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
  588. xfrm6_extract_header(skb);
  589. return 0;
  590. #else
  591. WARN_ON_ONCE(1);
  592. return -EAFNOSUPPORT;
  593. #endif
  594. }
  595. static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
  596. {
  597. const struct xfrm_mode *inner_mode;
  598. if (x->sel.family == AF_UNSPEC)
  599. inner_mode = xfrm_ip2inner_mode(x,
  600. xfrm_af2proto(skb_dst(skb)->ops->family));
  601. else
  602. inner_mode = &x->inner_mode;
  603. if (inner_mode == NULL)
  604. return -EAFNOSUPPORT;
  605. switch (inner_mode->family) {
  606. case AF_INET:
  607. return xfrm4_extract_output(x, skb);
  608. case AF_INET6:
  609. return xfrm6_extract_output(x, skb);
  610. }
  611. return -EAFNOSUPPORT;
  612. }
  613. void xfrm_local_error(struct sk_buff *skb, int mtu)
  614. {
  615. unsigned int proto;
  616. struct xfrm_state_afinfo *afinfo;
  617. if (skb->protocol == htons(ETH_P_IP))
  618. proto = AF_INET;
  619. else if (skb->protocol == htons(ETH_P_IPV6) &&
  620. skb->sk->sk_family == AF_INET6)
  621. proto = AF_INET6;
  622. else
  623. return;
  624. afinfo = xfrm_state_get_afinfo(proto);
  625. if (afinfo) {
  626. afinfo->local_error(skb, mtu);
  627. rcu_read_unlock();
  628. }
  629. }
  630. EXPORT_SYMBOL_GPL(xfrm_local_error);