ah6.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C)2002 USAGI/WIDE Project
  4. *
  5. * Authors
  6. *
  7. * Mitsuru KANDA @USAGI : IPv6 Support
  8. * Kazunori MIYAZAWA @USAGI :
  9. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  10. *
  11. * This file is derived from net/ipv4/ah.c.
  12. */
  13. #define pr_fmt(fmt) "IPv6: " fmt
  14. #include <crypto/algapi.h>
  15. #include <crypto/hash.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <net/ip.h>
  19. #include <net/ah.h>
  20. #include <linux/crypto.h>
  21. #include <linux/pfkeyv2.h>
  22. #include <linux/string.h>
  23. #include <linux/scatterlist.h>
  24. #include <net/ip6_route.h>
  25. #include <net/icmp.h>
  26. #include <net/ipv6.h>
  27. #include <net/protocol.h>
  28. #include <net/xfrm.h>
  29. #define IPV6HDR_BASELEN 8
  30. struct tmp_ext {
  31. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  32. struct in6_addr saddr;
  33. #endif
  34. struct in6_addr daddr;
  35. char hdrs[];
  36. };
  37. struct ah_skb_cb {
  38. struct xfrm_skb_cb xfrm;
  39. void *tmp;
  40. };
  41. #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
  42. static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
  43. unsigned int size)
  44. {
  45. unsigned int len;
  46. len = size + crypto_ahash_digestsize(ahash) +
  47. (crypto_ahash_alignmask(ahash) &
  48. ~(crypto_tfm_ctx_alignment() - 1));
  49. len = ALIGN(len, crypto_tfm_ctx_alignment());
  50. len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
  51. len = ALIGN(len, __alignof__(struct scatterlist));
  52. len += sizeof(struct scatterlist) * nfrags;
  53. return kmalloc(len, GFP_ATOMIC);
  54. }
  55. static inline struct tmp_ext *ah_tmp_ext(void *base)
  56. {
  57. return base + IPV6HDR_BASELEN;
  58. }
  59. static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
  60. {
  61. return tmp + offset;
  62. }
  63. static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
  64. unsigned int offset)
  65. {
  66. return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
  67. }
  68. static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
  69. u8 *icv)
  70. {
  71. struct ahash_request *req;
  72. req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
  73. crypto_tfm_ctx_alignment());
  74. ahash_request_set_tfm(req, ahash);
  75. return req;
  76. }
  77. static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
  78. struct ahash_request *req)
  79. {
  80. return (void *)ALIGN((unsigned long)(req + 1) +
  81. crypto_ahash_reqsize(ahash),
  82. __alignof__(struct scatterlist));
  83. }
  84. static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
  85. {
  86. u8 *opt = (u8 *)opthdr;
  87. int len = ipv6_optlen(opthdr);
  88. int off = 0;
  89. int optlen = 0;
  90. off += 2;
  91. len -= 2;
  92. while (len > 0) {
  93. switch (opt[off]) {
  94. case IPV6_TLV_PAD1:
  95. optlen = 1;
  96. break;
  97. default:
  98. if (len < 2)
  99. goto bad;
  100. optlen = opt[off+1]+2;
  101. if (len < optlen)
  102. goto bad;
  103. if (opt[off] & 0x20)
  104. memset(&opt[off+2], 0, opt[off+1]);
  105. break;
  106. }
  107. off += optlen;
  108. len -= optlen;
  109. }
  110. if (len == 0)
  111. return true;
  112. bad:
  113. return false;
  114. }
  115. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  116. /**
  117. * ipv6_rearrange_destopt - rearrange IPv6 destination options header
  118. * @iph: IPv6 header
  119. * @destopt: destionation options header
  120. */
  121. static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
  122. {
  123. u8 *opt = (u8 *)destopt;
  124. int len = ipv6_optlen(destopt);
  125. int off = 0;
  126. int optlen = 0;
  127. off += 2;
  128. len -= 2;
  129. while (len > 0) {
  130. switch (opt[off]) {
  131. case IPV6_TLV_PAD1:
  132. optlen = 1;
  133. break;
  134. default:
  135. if (len < 2)
  136. goto bad;
  137. optlen = opt[off+1]+2;
  138. if (len < optlen)
  139. goto bad;
  140. /* Rearrange the source address in @iph and the
  141. * addresses in home address option for final source.
  142. * See 11.3.2 of RFC 3775 for details.
  143. */
  144. if (opt[off] == IPV6_TLV_HAO) {
  145. struct in6_addr final_addr;
  146. struct ipv6_destopt_hao *hao;
  147. hao = (struct ipv6_destopt_hao *)&opt[off];
  148. if (hao->length != sizeof(hao->addr)) {
  149. net_warn_ratelimited("destopt hao: invalid header length: %u\n",
  150. hao->length);
  151. goto bad;
  152. }
  153. final_addr = hao->addr;
  154. hao->addr = iph->saddr;
  155. iph->saddr = final_addr;
  156. }
  157. break;
  158. }
  159. off += optlen;
  160. len -= optlen;
  161. }
  162. /* Note: ok if len == 0 */
  163. bad:
  164. return;
  165. }
  166. #else
  167. static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
  168. #endif
  169. /**
  170. * ipv6_rearrange_rthdr - rearrange IPv6 routing header
  171. * @iph: IPv6 header
  172. * @rthdr: routing header
  173. *
  174. * Rearrange the destination address in @iph and the addresses in @rthdr
  175. * so that they appear in the order they will at the final destination.
  176. * See Appendix A2 of RFC 2402 for details.
  177. */
  178. static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
  179. {
  180. int segments, segments_left;
  181. struct in6_addr *addrs;
  182. struct in6_addr final_addr;
  183. segments_left = rthdr->segments_left;
  184. if (segments_left == 0)
  185. return;
  186. rthdr->segments_left = 0;
  187. /* The value of rthdr->hdrlen has been verified either by the system
  188. * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
  189. * packets. So we can assume that it is even and that segments is
  190. * greater than or equal to segments_left.
  191. *
  192. * For the same reason we can assume that this option is of type 0.
  193. */
  194. segments = rthdr->hdrlen >> 1;
  195. addrs = ((struct rt0_hdr *)rthdr)->addr;
  196. final_addr = addrs[segments - 1];
  197. addrs += segments - segments_left;
  198. memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
  199. addrs[0] = iph->daddr;
  200. iph->daddr = final_addr;
  201. }
  202. static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
  203. {
  204. union {
  205. struct ipv6hdr *iph;
  206. struct ipv6_opt_hdr *opth;
  207. struct ipv6_rt_hdr *rth;
  208. char *raw;
  209. } exthdr = { .iph = iph };
  210. char *end = exthdr.raw + len;
  211. int nexthdr = iph->nexthdr;
  212. exthdr.iph++;
  213. while (exthdr.raw < end) {
  214. switch (nexthdr) {
  215. case NEXTHDR_DEST:
  216. if (dir == XFRM_POLICY_OUT)
  217. ipv6_rearrange_destopt(iph, exthdr.opth);
  218. fallthrough;
  219. case NEXTHDR_HOP:
  220. if (!zero_out_mutable_opts(exthdr.opth)) {
  221. net_dbg_ratelimited("overrun %sopts\n",
  222. nexthdr == NEXTHDR_HOP ?
  223. "hop" : "dest");
  224. return -EINVAL;
  225. }
  226. break;
  227. case NEXTHDR_ROUTING:
  228. ipv6_rearrange_rthdr(iph, exthdr.rth);
  229. break;
  230. default:
  231. return 0;
  232. }
  233. nexthdr = exthdr.opth->nexthdr;
  234. exthdr.raw += ipv6_optlen(exthdr.opth);
  235. }
  236. return 0;
  237. }
  238. static void ah6_output_done(struct crypto_async_request *base, int err)
  239. {
  240. int extlen;
  241. u8 *iph_base;
  242. u8 *icv;
  243. struct sk_buff *skb = base->data;
  244. struct xfrm_state *x = skb_dst(skb)->xfrm;
  245. struct ah_data *ahp = x->data;
  246. struct ipv6hdr *top_iph = ipv6_hdr(skb);
  247. struct ip_auth_hdr *ah = ip_auth_hdr(skb);
  248. struct tmp_ext *iph_ext;
  249. extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
  250. if (extlen)
  251. extlen += sizeof(*iph_ext);
  252. iph_base = AH_SKB_CB(skb)->tmp;
  253. iph_ext = ah_tmp_ext(iph_base);
  254. icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
  255. memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
  256. memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
  257. if (extlen) {
  258. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  259. memcpy(&top_iph->saddr, iph_ext, extlen);
  260. #else
  261. memcpy(&top_iph->daddr, iph_ext, extlen);
  262. #endif
  263. }
  264. kfree(AH_SKB_CB(skb)->tmp);
  265. xfrm_output_resume(skb, err);
  266. }
  267. static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
  268. {
  269. int err;
  270. int nfrags;
  271. int extlen;
  272. u8 *iph_base;
  273. u8 *icv;
  274. u8 nexthdr;
  275. struct sk_buff *trailer;
  276. struct crypto_ahash *ahash;
  277. struct ahash_request *req;
  278. struct scatterlist *sg;
  279. struct ipv6hdr *top_iph;
  280. struct ip_auth_hdr *ah;
  281. struct ah_data *ahp;
  282. struct tmp_ext *iph_ext;
  283. int seqhi_len = 0;
  284. __be32 *seqhi;
  285. int sglists = 0;
  286. struct scatterlist *seqhisg;
  287. ahp = x->data;
  288. ahash = ahp->ahash;
  289. err = skb_cow_data(skb, 0, &trailer);
  290. if (err < 0)
  291. goto out;
  292. nfrags = err;
  293. skb_push(skb, -skb_network_offset(skb));
  294. extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
  295. if (extlen)
  296. extlen += sizeof(*iph_ext);
  297. if (x->props.flags & XFRM_STATE_ESN) {
  298. sglists = 1;
  299. seqhi_len = sizeof(*seqhi);
  300. }
  301. err = -ENOMEM;
  302. iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
  303. extlen + seqhi_len);
  304. if (!iph_base)
  305. goto out;
  306. iph_ext = ah_tmp_ext(iph_base);
  307. seqhi = (__be32 *)((char *)iph_ext + extlen);
  308. icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
  309. req = ah_tmp_req(ahash, icv);
  310. sg = ah_req_sg(ahash, req);
  311. seqhisg = sg + nfrags;
  312. ah = ip_auth_hdr(skb);
  313. memset(ah->auth_data, 0, ahp->icv_trunc_len);
  314. top_iph = ipv6_hdr(skb);
  315. top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
  316. nexthdr = *skb_mac_header(skb);
  317. *skb_mac_header(skb) = IPPROTO_AH;
  318. /* When there are no extension headers, we only need to save the first
  319. * 8 bytes of the base IP header.
  320. */
  321. memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
  322. if (extlen) {
  323. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  324. memcpy(iph_ext, &top_iph->saddr, extlen);
  325. #else
  326. memcpy(iph_ext, &top_iph->daddr, extlen);
  327. #endif
  328. err = ipv6_clear_mutable_options(top_iph,
  329. extlen - sizeof(*iph_ext) +
  330. sizeof(*top_iph),
  331. XFRM_POLICY_OUT);
  332. if (err)
  333. goto out_free;
  334. }
  335. ah->nexthdr = nexthdr;
  336. top_iph->priority = 0;
  337. top_iph->flow_lbl[0] = 0;
  338. top_iph->flow_lbl[1] = 0;
  339. top_iph->flow_lbl[2] = 0;
  340. top_iph->hop_limit = 0;
  341. ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
  342. ah->reserved = 0;
  343. ah->spi = x->id.spi;
  344. ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  345. sg_init_table(sg, nfrags + sglists);
  346. err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
  347. if (unlikely(err < 0))
  348. goto out_free;
  349. if (x->props.flags & XFRM_STATE_ESN) {
  350. /* Attach seqhi sg right after packet payload */
  351. *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
  352. sg_set_buf(seqhisg, seqhi, seqhi_len);
  353. }
  354. ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
  355. ahash_request_set_callback(req, 0, ah6_output_done, skb);
  356. AH_SKB_CB(skb)->tmp = iph_base;
  357. err = crypto_ahash_digest(req);
  358. if (err) {
  359. if (err == -EINPROGRESS)
  360. goto out;
  361. if (err == -ENOSPC)
  362. err = NET_XMIT_DROP;
  363. goto out_free;
  364. }
  365. memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
  366. memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
  367. if (extlen) {
  368. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  369. memcpy(&top_iph->saddr, iph_ext, extlen);
  370. #else
  371. memcpy(&top_iph->daddr, iph_ext, extlen);
  372. #endif
  373. }
  374. out_free:
  375. kfree(iph_base);
  376. out:
  377. return err;
  378. }
  379. static void ah6_input_done(struct crypto_async_request *base, int err)
  380. {
  381. u8 *auth_data;
  382. u8 *icv;
  383. u8 *work_iph;
  384. struct sk_buff *skb = base->data;
  385. struct xfrm_state *x = xfrm_input_state(skb);
  386. struct ah_data *ahp = x->data;
  387. struct ip_auth_hdr *ah = ip_auth_hdr(skb);
  388. int hdr_len = skb_network_header_len(skb);
  389. int ah_hlen = ipv6_authlen(ah);
  390. if (err)
  391. goto out;
  392. work_iph = AH_SKB_CB(skb)->tmp;
  393. auth_data = ah_tmp_auth(work_iph, hdr_len);
  394. icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
  395. err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
  396. if (err)
  397. goto out;
  398. err = ah->nexthdr;
  399. skb->network_header += ah_hlen;
  400. memcpy(skb_network_header(skb), work_iph, hdr_len);
  401. __skb_pull(skb, ah_hlen + hdr_len);
  402. if (x->props.mode == XFRM_MODE_TUNNEL)
  403. skb_reset_transport_header(skb);
  404. else
  405. skb_set_transport_header(skb, -hdr_len);
  406. out:
  407. kfree(AH_SKB_CB(skb)->tmp);
  408. xfrm_input_resume(skb, err);
  409. }
  410. static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
  411. {
  412. /*
  413. * Before process AH
  414. * [IPv6][Ext1][Ext2][AH][Dest][Payload]
  415. * |<-------------->| hdr_len
  416. *
  417. * To erase AH:
  418. * Keeping copy of cleared headers. After AH processing,
  419. * Moving the pointer of skb->network_header by using skb_pull as long
  420. * as AH header length. Then copy back the copy as long as hdr_len
  421. * If destination header following AH exists, copy it into after [Ext2].
  422. *
  423. * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
  424. * There is offset of AH before IPv6 header after the process.
  425. */
  426. u8 *auth_data;
  427. u8 *icv;
  428. u8 *work_iph;
  429. struct sk_buff *trailer;
  430. struct crypto_ahash *ahash;
  431. struct ahash_request *req;
  432. struct scatterlist *sg;
  433. struct ip_auth_hdr *ah;
  434. struct ipv6hdr *ip6h;
  435. struct ah_data *ahp;
  436. u16 hdr_len;
  437. u16 ah_hlen;
  438. int nexthdr;
  439. int nfrags;
  440. int err = -ENOMEM;
  441. int seqhi_len = 0;
  442. __be32 *seqhi;
  443. int sglists = 0;
  444. struct scatterlist *seqhisg;
  445. if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
  446. goto out;
  447. /* We are going to _remove_ AH header to keep sockets happy,
  448. * so... Later this can change. */
  449. if (skb_unclone(skb, GFP_ATOMIC))
  450. goto out;
  451. skb->ip_summed = CHECKSUM_NONE;
  452. hdr_len = skb_network_header_len(skb);
  453. ah = (struct ip_auth_hdr *)skb->data;
  454. ahp = x->data;
  455. ahash = ahp->ahash;
  456. nexthdr = ah->nexthdr;
  457. ah_hlen = ipv6_authlen(ah);
  458. if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
  459. ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
  460. goto out;
  461. if (!pskb_may_pull(skb, ah_hlen))
  462. goto out;
  463. err = skb_cow_data(skb, 0, &trailer);
  464. if (err < 0)
  465. goto out;
  466. nfrags = err;
  467. ah = (struct ip_auth_hdr *)skb->data;
  468. ip6h = ipv6_hdr(skb);
  469. skb_push(skb, hdr_len);
  470. if (x->props.flags & XFRM_STATE_ESN) {
  471. sglists = 1;
  472. seqhi_len = sizeof(*seqhi);
  473. }
  474. work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
  475. ahp->icv_trunc_len + seqhi_len);
  476. if (!work_iph) {
  477. err = -ENOMEM;
  478. goto out;
  479. }
  480. auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
  481. seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
  482. icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
  483. req = ah_tmp_req(ahash, icv);
  484. sg = ah_req_sg(ahash, req);
  485. seqhisg = sg + nfrags;
  486. memcpy(work_iph, ip6h, hdr_len);
  487. memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
  488. memset(ah->auth_data, 0, ahp->icv_trunc_len);
  489. err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
  490. if (err)
  491. goto out_free;
  492. ip6h->priority = 0;
  493. ip6h->flow_lbl[0] = 0;
  494. ip6h->flow_lbl[1] = 0;
  495. ip6h->flow_lbl[2] = 0;
  496. ip6h->hop_limit = 0;
  497. sg_init_table(sg, nfrags + sglists);
  498. err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
  499. if (unlikely(err < 0))
  500. goto out_free;
  501. if (x->props.flags & XFRM_STATE_ESN) {
  502. /* Attach seqhi sg right after packet payload */
  503. *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
  504. sg_set_buf(seqhisg, seqhi, seqhi_len);
  505. }
  506. ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
  507. ahash_request_set_callback(req, 0, ah6_input_done, skb);
  508. AH_SKB_CB(skb)->tmp = work_iph;
  509. err = crypto_ahash_digest(req);
  510. if (err) {
  511. if (err == -EINPROGRESS)
  512. goto out;
  513. goto out_free;
  514. }
  515. err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
  516. if (err)
  517. goto out_free;
  518. skb->network_header += ah_hlen;
  519. memcpy(skb_network_header(skb), work_iph, hdr_len);
  520. __skb_pull(skb, ah_hlen + hdr_len);
  521. if (x->props.mode == XFRM_MODE_TUNNEL)
  522. skb_reset_transport_header(skb);
  523. else
  524. skb_set_transport_header(skb, -hdr_len);
  525. err = nexthdr;
  526. out_free:
  527. kfree(work_iph);
  528. out:
  529. return err;
  530. }
  531. static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  532. u8 type, u8 code, int offset, __be32 info)
  533. {
  534. struct net *net = dev_net(skb->dev);
  535. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  536. struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
  537. struct xfrm_state *x;
  538. if (type != ICMPV6_PKT_TOOBIG &&
  539. type != NDISC_REDIRECT)
  540. return 0;
  541. x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
  542. if (!x)
  543. return 0;
  544. if (type == NDISC_REDIRECT)
  545. ip6_redirect(skb, net, skb->dev->ifindex, 0,
  546. sock_net_uid(net, NULL));
  547. else
  548. ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
  549. xfrm_state_put(x);
  550. return 0;
  551. }
  552. static int ah6_init_state(struct xfrm_state *x)
  553. {
  554. struct ah_data *ahp = NULL;
  555. struct xfrm_algo_desc *aalg_desc;
  556. struct crypto_ahash *ahash;
  557. if (!x->aalg)
  558. goto error;
  559. if (x->encap)
  560. goto error;
  561. ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
  562. if (!ahp)
  563. return -ENOMEM;
  564. ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
  565. if (IS_ERR(ahash))
  566. goto error;
  567. ahp->ahash = ahash;
  568. if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
  569. (x->aalg->alg_key_len + 7) / 8))
  570. goto error;
  571. /*
  572. * Lookup the algorithm description maintained by xfrm_algo,
  573. * verify crypto transform properties, and store information
  574. * we need for AH processing. This lookup cannot fail here
  575. * after a successful crypto_alloc_hash().
  576. */
  577. aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
  578. BUG_ON(!aalg_desc);
  579. if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
  580. crypto_ahash_digestsize(ahash)) {
  581. pr_info("AH: %s digestsize %u != %hu\n",
  582. x->aalg->alg_name, crypto_ahash_digestsize(ahash),
  583. aalg_desc->uinfo.auth.icv_fullbits/8);
  584. goto error;
  585. }
  586. ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
  587. ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
  588. x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
  589. ahp->icv_trunc_len);
  590. switch (x->props.mode) {
  591. case XFRM_MODE_BEET:
  592. case XFRM_MODE_TRANSPORT:
  593. break;
  594. case XFRM_MODE_TUNNEL:
  595. x->props.header_len += sizeof(struct ipv6hdr);
  596. break;
  597. default:
  598. goto error;
  599. }
  600. x->data = ahp;
  601. return 0;
  602. error:
  603. if (ahp) {
  604. crypto_free_ahash(ahp->ahash);
  605. kfree(ahp);
  606. }
  607. return -EINVAL;
  608. }
  609. static void ah6_destroy(struct xfrm_state *x)
  610. {
  611. struct ah_data *ahp = x->data;
  612. if (!ahp)
  613. return;
  614. crypto_free_ahash(ahp->ahash);
  615. kfree(ahp);
  616. }
  617. static int ah6_rcv_cb(struct sk_buff *skb, int err)
  618. {
  619. return 0;
  620. }
  621. static const struct xfrm_type ah6_type = {
  622. .description = "AH6",
  623. .owner = THIS_MODULE,
  624. .proto = IPPROTO_AH,
  625. .flags = XFRM_TYPE_REPLAY_PROT,
  626. .init_state = ah6_init_state,
  627. .destructor = ah6_destroy,
  628. .input = ah6_input,
  629. .output = ah6_output,
  630. .hdr_offset = xfrm6_find_1stfragopt,
  631. };
  632. static struct xfrm6_protocol ah6_protocol = {
  633. .handler = xfrm6_rcv,
  634. .input_handler = xfrm_input,
  635. .cb_handler = ah6_rcv_cb,
  636. .err_handler = ah6_err,
  637. .priority = 0,
  638. };
  639. static int __init ah6_init(void)
  640. {
  641. if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
  642. pr_info("%s: can't add xfrm type\n", __func__);
  643. return -EAGAIN;
  644. }
  645. if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
  646. pr_info("%s: can't add protocol\n", __func__);
  647. xfrm_unregister_type(&ah6_type, AF_INET6);
  648. return -EAGAIN;
  649. }
  650. return 0;
  651. }
  652. static void __exit ah6_fini(void)
  653. {
  654. if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
  655. pr_info("%s: can't remove protocol\n", __func__);
  656. xfrm_unregister_type(&ah6_type, AF_INET6);
  657. }
  658. module_init(ah6_init);
  659. module_exit(ah6_fini);
  660. MODULE_LICENSE("GPL");
  661. MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);