exthdrs.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Extension Header handling for IPv6
  4. * Linux INET6 implementation
  5. *
  6. * Authors:
  7. * Pedro Roque <roque@di.fc.ul.pt>
  8. * Andi Kleen <ak@muc.de>
  9. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  10. */
  11. /* Changes:
  12. * yoshfuji : ensure not to overrun while parsing
  13. * tlv options.
  14. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
  15. * YOSHIFUJI Hideaki @USAGI Register inbound extension header
  16. * handlers as inet6_protocol{}.
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/types.h>
  20. #include <linux/socket.h>
  21. #include <linux/sockios.h>
  22. #include <linux/net.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/in6.h>
  25. #include <linux/icmpv6.h>
  26. #include <linux/slab.h>
  27. #include <linux/export.h>
  28. #include <net/dst.h>
  29. #include <net/sock.h>
  30. #include <net/snmp.h>
  31. #include <net/ipv6.h>
  32. #include <net/protocol.h>
  33. #include <net/transp_v6.h>
  34. #include <net/rawv6.h>
  35. #include <net/ndisc.h>
  36. #include <net/ip6_route.h>
  37. #include <net/addrconf.h>
  38. #include <net/calipso.h>
  39. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  40. #include <net/xfrm.h>
  41. #endif
  42. #include <linux/seg6.h>
  43. #include <net/seg6.h>
  44. #ifdef CONFIG_IPV6_SEG6_HMAC
  45. #include <net/seg6_hmac.h>
  46. #endif
  47. #include <net/rpl.h>
  48. #include <linux/uaccess.h>
  49. /*
  50. * Parsing tlv encoded headers.
  51. *
  52. * Parsing function "func" returns true, if parsing succeed
  53. * and false, if it failed.
  54. * It MUST NOT touch skb->h.
  55. */
  56. struct tlvtype_proc {
  57. int type;
  58. bool (*func)(struct sk_buff *skb, int offset);
  59. };
  60. /*********************
  61. Generic functions
  62. *********************/
  63. /* An unknown option is detected, decide what to do */
  64. static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
  65. bool disallow_unknowns)
  66. {
  67. if (disallow_unknowns) {
  68. /* If unknown TLVs are disallowed by configuration
  69. * then always silently drop packet. Note this also
  70. * means no ICMP parameter problem is sent which
  71. * could be a good property to mitigate a reflection DOS
  72. * attack.
  73. */
  74. goto drop;
  75. }
  76. switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
  77. case 0: /* ignore */
  78. return true;
  79. case 1: /* drop packet */
  80. break;
  81. case 3: /* Send ICMP if not a multicast address and drop packet */
  82. /* Actually, it is redundant check. icmp_send
  83. will recheck in any case.
  84. */
  85. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
  86. break;
  87. fallthrough;
  88. case 2: /* send ICMP PARM PROB regardless and drop packet */
  89. icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
  90. return false;
  91. }
  92. drop:
  93. kfree_skb(skb);
  94. return false;
  95. }
  96. /* Parse tlv encoded option header (hop-by-hop or destination) */
  97. static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
  98. struct sk_buff *skb,
  99. int max_count)
  100. {
  101. int len = (skb_transport_header(skb)[1] + 1) << 3;
  102. const unsigned char *nh = skb_network_header(skb);
  103. int off = skb_network_header_len(skb);
  104. const struct tlvtype_proc *curr;
  105. bool disallow_unknowns = false;
  106. int tlv_count = 0;
  107. int padlen = 0;
  108. if (unlikely(max_count < 0)) {
  109. disallow_unknowns = true;
  110. max_count = -max_count;
  111. }
  112. if (skb_transport_offset(skb) + len > skb_headlen(skb))
  113. goto bad;
  114. off += 2;
  115. len -= 2;
  116. while (len > 0) {
  117. int optlen, i;
  118. if (nh[off] == IPV6_TLV_PAD1) {
  119. padlen++;
  120. if (padlen > 7)
  121. goto bad;
  122. off++;
  123. len--;
  124. continue;
  125. }
  126. if (len < 2)
  127. goto bad;
  128. optlen = nh[off + 1] + 2;
  129. if (optlen > len)
  130. goto bad;
  131. if (nh[off] == IPV6_TLV_PADN) {
  132. /* RFC 2460 states that the purpose of PadN is
  133. * to align the containing header to multiples
  134. * of 8. 7 is therefore the highest valid value.
  135. * See also RFC 4942, Section 2.1.9.5.
  136. */
  137. padlen += optlen;
  138. if (padlen > 7)
  139. goto bad;
  140. /* RFC 4942 recommends receiving hosts to
  141. * actively check PadN payload to contain
  142. * only zeroes.
  143. */
  144. for (i = 2; i < optlen; i++) {
  145. if (nh[off + i] != 0)
  146. goto bad;
  147. }
  148. } else {
  149. tlv_count++;
  150. if (tlv_count > max_count)
  151. goto bad;
  152. for (curr = procs; curr->type >= 0; curr++) {
  153. if (curr->type == nh[off]) {
  154. /* type specific length/alignment
  155. checks will be performed in the
  156. func(). */
  157. if (curr->func(skb, off) == false)
  158. return false;
  159. break;
  160. }
  161. }
  162. if (curr->type < 0 &&
  163. !ip6_tlvopt_unknown(skb, off, disallow_unknowns))
  164. return false;
  165. padlen = 0;
  166. }
  167. off += optlen;
  168. len -= optlen;
  169. }
  170. if (len == 0)
  171. return true;
  172. bad:
  173. kfree_skb(skb);
  174. return false;
  175. }
  176. /*****************************
  177. Destination options header.
  178. *****************************/
  179. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  180. static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
  181. {
  182. struct ipv6_destopt_hao *hao;
  183. struct inet6_skb_parm *opt = IP6CB(skb);
  184. struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  185. int ret;
  186. if (opt->dsthao) {
  187. net_dbg_ratelimited("hao duplicated\n");
  188. goto discard;
  189. }
  190. opt->dsthao = opt->dst1;
  191. opt->dst1 = 0;
  192. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
  193. if (hao->length != 16) {
  194. net_dbg_ratelimited("hao invalid option length = %d\n",
  195. hao->length);
  196. goto discard;
  197. }
  198. if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
  199. net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
  200. &hao->addr);
  201. goto discard;
  202. }
  203. ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
  204. (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
  205. if (unlikely(ret < 0))
  206. goto discard;
  207. if (skb_cloned(skb)) {
  208. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  209. goto discard;
  210. /* update all variable using below by copied skbuff */
  211. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
  212. optoff);
  213. ipv6h = ipv6_hdr(skb);
  214. }
  215. if (skb->ip_summed == CHECKSUM_COMPLETE)
  216. skb->ip_summed = CHECKSUM_NONE;
  217. swap(ipv6h->saddr, hao->addr);
  218. if (skb->tstamp == 0)
  219. __net_timestamp(skb);
  220. return true;
  221. discard:
  222. kfree_skb(skb);
  223. return false;
  224. }
  225. #endif
  226. static const struct tlvtype_proc tlvprocdestopt_lst[] = {
  227. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  228. {
  229. .type = IPV6_TLV_HAO,
  230. .func = ipv6_dest_hao,
  231. },
  232. #endif
  233. {-1, NULL}
  234. };
  235. static int ipv6_destopt_rcv(struct sk_buff *skb)
  236. {
  237. struct inet6_dev *idev = __in6_dev_get(skb->dev);
  238. struct inet6_skb_parm *opt = IP6CB(skb);
  239. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  240. __u16 dstbuf;
  241. #endif
  242. struct dst_entry *dst = skb_dst(skb);
  243. struct net *net = dev_net(skb->dev);
  244. int extlen;
  245. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  246. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  247. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  248. __IP6_INC_STATS(dev_net(dst->dev), idev,
  249. IPSTATS_MIB_INHDRERRORS);
  250. fail_and_free:
  251. kfree_skb(skb);
  252. return -1;
  253. }
  254. extlen = (skb_transport_header(skb)[1] + 1) << 3;
  255. if (extlen > net->ipv6.sysctl.max_dst_opts_len)
  256. goto fail_and_free;
  257. opt->lastopt = opt->dst1 = skb_network_header_len(skb);
  258. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  259. dstbuf = opt->dst1;
  260. #endif
  261. if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
  262. net->ipv6.sysctl.max_dst_opts_cnt)) {
  263. skb->transport_header += extlen;
  264. opt = IP6CB(skb);
  265. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  266. opt->nhoff = dstbuf;
  267. #else
  268. opt->nhoff = opt->dst1;
  269. #endif
  270. return 1;
  271. }
  272. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  273. return -1;
  274. }
  275. static void seg6_update_csum(struct sk_buff *skb)
  276. {
  277. struct ipv6_sr_hdr *hdr;
  278. struct in6_addr *addr;
  279. __be32 from, to;
  280. /* srh is at transport offset and seg_left is already decremented
  281. * but daddr is not yet updated with next segment
  282. */
  283. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  284. addr = hdr->segments + hdr->segments_left;
  285. hdr->segments_left++;
  286. from = *(__be32 *)hdr;
  287. hdr->segments_left--;
  288. to = *(__be32 *)hdr;
  289. /* update skb csum with diff resulting from seg_left decrement */
  290. update_csum_diff4(skb, from, to);
  291. /* compute csum diff between current and next segment and update */
  292. update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
  293. (__be32 *)addr);
  294. }
  295. static int ipv6_srh_rcv(struct sk_buff *skb)
  296. {
  297. struct inet6_skb_parm *opt = IP6CB(skb);
  298. struct net *net = dev_net(skb->dev);
  299. struct ipv6_sr_hdr *hdr;
  300. struct inet6_dev *idev;
  301. struct in6_addr *addr;
  302. int accept_seg6;
  303. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  304. idev = __in6_dev_get(skb->dev);
  305. accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
  306. if (accept_seg6 > idev->cnf.seg6_enabled)
  307. accept_seg6 = idev->cnf.seg6_enabled;
  308. if (!accept_seg6) {
  309. kfree_skb(skb);
  310. return -1;
  311. }
  312. #ifdef CONFIG_IPV6_SEG6_HMAC
  313. if (!seg6_hmac_validate_skb(skb)) {
  314. kfree_skb(skb);
  315. return -1;
  316. }
  317. #endif
  318. looped_back:
  319. if (hdr->segments_left == 0) {
  320. if (hdr->nexthdr == NEXTHDR_IPV6) {
  321. int offset = (hdr->hdrlen + 1) << 3;
  322. skb_postpull_rcsum(skb, skb_network_header(skb),
  323. skb_network_header_len(skb));
  324. if (!pskb_pull(skb, offset)) {
  325. kfree_skb(skb);
  326. return -1;
  327. }
  328. skb_postpull_rcsum(skb, skb_transport_header(skb),
  329. offset);
  330. skb_reset_network_header(skb);
  331. skb_reset_transport_header(skb);
  332. skb->encapsulation = 0;
  333. __skb_tunnel_rx(skb, skb->dev, net);
  334. netif_rx(skb);
  335. return -1;
  336. }
  337. opt->srcrt = skb_network_header_len(skb);
  338. opt->lastopt = opt->srcrt;
  339. skb->transport_header += (hdr->hdrlen + 1) << 3;
  340. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  341. return 1;
  342. }
  343. if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
  344. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  345. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  346. ((&hdr->segments_left) -
  347. skb_network_header(skb)));
  348. return -1;
  349. }
  350. if (skb_cloned(skb)) {
  351. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  352. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  353. IPSTATS_MIB_OUTDISCARDS);
  354. kfree_skb(skb);
  355. return -1;
  356. }
  357. }
  358. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  359. hdr->segments_left--;
  360. addr = hdr->segments + hdr->segments_left;
  361. skb_push(skb, sizeof(struct ipv6hdr));
  362. if (skb->ip_summed == CHECKSUM_COMPLETE)
  363. seg6_update_csum(skb);
  364. ipv6_hdr(skb)->daddr = *addr;
  365. skb_dst_drop(skb);
  366. ip6_route_input(skb);
  367. if (skb_dst(skb)->error) {
  368. dst_input(skb);
  369. return -1;
  370. }
  371. if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
  372. if (ipv6_hdr(skb)->hop_limit <= 1) {
  373. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  374. icmpv6_send(skb, ICMPV6_TIME_EXCEED,
  375. ICMPV6_EXC_HOPLIMIT, 0);
  376. kfree_skb(skb);
  377. return -1;
  378. }
  379. ipv6_hdr(skb)->hop_limit--;
  380. skb_pull(skb, sizeof(struct ipv6hdr));
  381. goto looped_back;
  382. }
  383. dst_input(skb);
  384. return -1;
  385. }
  386. static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
  387. {
  388. struct ipv6_rpl_sr_hdr *hdr, *ohdr, *chdr;
  389. struct inet6_skb_parm *opt = IP6CB(skb);
  390. struct net *net = dev_net(skb->dev);
  391. struct inet6_dev *idev;
  392. struct ipv6hdr *oldhdr;
  393. struct in6_addr addr;
  394. unsigned char *buf;
  395. int accept_rpl_seg;
  396. int i, err;
  397. u64 n = 0;
  398. u32 r;
  399. idev = __in6_dev_get(skb->dev);
  400. accept_rpl_seg = net->ipv6.devconf_all->rpl_seg_enabled;
  401. if (accept_rpl_seg > idev->cnf.rpl_seg_enabled)
  402. accept_rpl_seg = idev->cnf.rpl_seg_enabled;
  403. if (!accept_rpl_seg) {
  404. kfree_skb(skb);
  405. return -1;
  406. }
  407. looped_back:
  408. hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
  409. if (hdr->segments_left == 0) {
  410. if (hdr->nexthdr == NEXTHDR_IPV6) {
  411. int offset = (hdr->hdrlen + 1) << 3;
  412. skb_postpull_rcsum(skb, skb_network_header(skb),
  413. skb_network_header_len(skb));
  414. if (!pskb_pull(skb, offset)) {
  415. kfree_skb(skb);
  416. return -1;
  417. }
  418. skb_postpull_rcsum(skb, skb_transport_header(skb),
  419. offset);
  420. skb_reset_network_header(skb);
  421. skb_reset_transport_header(skb);
  422. skb->encapsulation = 0;
  423. __skb_tunnel_rx(skb, skb->dev, net);
  424. netif_rx(skb);
  425. return -1;
  426. }
  427. opt->srcrt = skb_network_header_len(skb);
  428. opt->lastopt = opt->srcrt;
  429. skb->transport_header += (hdr->hdrlen + 1) << 3;
  430. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  431. return 1;
  432. }
  433. if (!pskb_may_pull(skb, sizeof(*hdr))) {
  434. kfree_skb(skb);
  435. return -1;
  436. }
  437. n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre);
  438. r = do_div(n, (16 - hdr->cmpri));
  439. /* checks if calculation was without remainder and n fits into
  440. * unsigned char which is segments_left field. Should not be
  441. * higher than that.
  442. */
  443. if (r || (n + 1) > 255) {
  444. kfree_skb(skb);
  445. return -1;
  446. }
  447. if (hdr->segments_left > n + 1) {
  448. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  449. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  450. ((&hdr->segments_left) -
  451. skb_network_header(skb)));
  452. return -1;
  453. }
  454. if (skb_cloned(skb)) {
  455. if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
  456. GFP_ATOMIC)) {
  457. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  458. IPSTATS_MIB_OUTDISCARDS);
  459. kfree_skb(skb);
  460. return -1;
  461. }
  462. } else {
  463. err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
  464. if (unlikely(err)) {
  465. kfree_skb(skb);
  466. return -1;
  467. }
  468. }
  469. hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
  470. if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
  471. hdr->cmpre))) {
  472. kfree_skb(skb);
  473. return -1;
  474. }
  475. hdr->segments_left--;
  476. i = n - hdr->segments_left;
  477. buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC);
  478. if (unlikely(!buf)) {
  479. kfree_skb(skb);
  480. return -1;
  481. }
  482. ohdr = (struct ipv6_rpl_sr_hdr *)buf;
  483. ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n);
  484. chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3));
  485. if ((ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST) ||
  486. (ipv6_addr_type(&ohdr->rpl_segaddr[i]) & IPV6_ADDR_MULTICAST)) {
  487. kfree_skb(skb);
  488. kfree(buf);
  489. return -1;
  490. }
  491. err = ipv6_chk_rpl_srh_loop(net, ohdr->rpl_segaddr, n + 1);
  492. if (err) {
  493. icmpv6_send(skb, ICMPV6_PARAMPROB, 0, 0);
  494. kfree_skb(skb);
  495. kfree(buf);
  496. return -1;
  497. }
  498. addr = ipv6_hdr(skb)->daddr;
  499. ipv6_hdr(skb)->daddr = ohdr->rpl_segaddr[i];
  500. ohdr->rpl_segaddr[i] = addr;
  501. ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n);
  502. oldhdr = ipv6_hdr(skb);
  503. skb_pull(skb, ((hdr->hdrlen + 1) << 3));
  504. skb_postpull_rcsum(skb, oldhdr,
  505. sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
  506. skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
  507. skb_reset_network_header(skb);
  508. skb_mac_header_rebuild(skb);
  509. skb_set_transport_header(skb, sizeof(struct ipv6hdr));
  510. memmove(ipv6_hdr(skb), oldhdr, sizeof(struct ipv6hdr));
  511. memcpy(skb_transport_header(skb), chdr, (chdr->hdrlen + 1) << 3);
  512. ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
  513. skb_postpush_rcsum(skb, ipv6_hdr(skb),
  514. sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3));
  515. kfree(buf);
  516. skb_dst_drop(skb);
  517. ip6_route_input(skb);
  518. if (skb_dst(skb)->error) {
  519. dst_input(skb);
  520. return -1;
  521. }
  522. if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
  523. if (ipv6_hdr(skb)->hop_limit <= 1) {
  524. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  525. icmpv6_send(skb, ICMPV6_TIME_EXCEED,
  526. ICMPV6_EXC_HOPLIMIT, 0);
  527. kfree_skb(skb);
  528. return -1;
  529. }
  530. ipv6_hdr(skb)->hop_limit--;
  531. skb_pull(skb, sizeof(struct ipv6hdr));
  532. goto looped_back;
  533. }
  534. dst_input(skb);
  535. return -1;
  536. }
  537. /********************************
  538. Routing header.
  539. ********************************/
  540. /* called with rcu_read_lock() */
  541. static int ipv6_rthdr_rcv(struct sk_buff *skb)
  542. {
  543. struct inet6_dev *idev = __in6_dev_get(skb->dev);
  544. struct inet6_skb_parm *opt = IP6CB(skb);
  545. struct in6_addr *addr = NULL;
  546. struct in6_addr daddr;
  547. int n, i;
  548. struct ipv6_rt_hdr *hdr;
  549. struct rt0_hdr *rthdr;
  550. struct net *net = dev_net(skb->dev);
  551. int accept_source_route = net->ipv6.devconf_all->accept_source_route;
  552. idev = __in6_dev_get(skb->dev);
  553. if (idev && accept_source_route > idev->cnf.accept_source_route)
  554. accept_source_route = idev->cnf.accept_source_route;
  555. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  556. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  557. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  558. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  559. kfree_skb(skb);
  560. return -1;
  561. }
  562. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  563. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
  564. skb->pkt_type != PACKET_HOST) {
  565. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  566. kfree_skb(skb);
  567. return -1;
  568. }
  569. switch (hdr->type) {
  570. case IPV6_SRCRT_TYPE_4:
  571. /* segment routing */
  572. return ipv6_srh_rcv(skb);
  573. case IPV6_SRCRT_TYPE_3:
  574. /* rpl segment routing */
  575. return ipv6_rpl_srh_rcv(skb);
  576. default:
  577. break;
  578. }
  579. looped_back:
  580. if (hdr->segments_left == 0) {
  581. switch (hdr->type) {
  582. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  583. case IPV6_SRCRT_TYPE_2:
  584. /* Silently discard type 2 header unless it was
  585. * processed by own
  586. */
  587. if (!addr) {
  588. __IP6_INC_STATS(net, idev,
  589. IPSTATS_MIB_INADDRERRORS);
  590. kfree_skb(skb);
  591. return -1;
  592. }
  593. break;
  594. #endif
  595. default:
  596. break;
  597. }
  598. opt->lastopt = opt->srcrt = skb_network_header_len(skb);
  599. skb->transport_header += (hdr->hdrlen + 1) << 3;
  600. opt->dst0 = opt->dst1;
  601. opt->dst1 = 0;
  602. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  603. return 1;
  604. }
  605. switch (hdr->type) {
  606. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  607. case IPV6_SRCRT_TYPE_2:
  608. if (accept_source_route < 0)
  609. goto unknown_rh;
  610. /* Silently discard invalid RTH type 2 */
  611. if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
  612. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  613. kfree_skb(skb);
  614. return -1;
  615. }
  616. break;
  617. #endif
  618. default:
  619. goto unknown_rh;
  620. }
  621. /*
  622. * This is the routing header forwarding algorithm from
  623. * RFC 2460, page 16.
  624. */
  625. n = hdr->hdrlen >> 1;
  626. if (hdr->segments_left > n) {
  627. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  628. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  629. ((&hdr->segments_left) -
  630. skb_network_header(skb)));
  631. return -1;
  632. }
  633. /* We are about to mangle packet header. Be careful!
  634. Do not damage packets queued somewhere.
  635. */
  636. if (skb_cloned(skb)) {
  637. /* the copy is a forwarded packet */
  638. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  639. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  640. IPSTATS_MIB_OUTDISCARDS);
  641. kfree_skb(skb);
  642. return -1;
  643. }
  644. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  645. }
  646. if (skb->ip_summed == CHECKSUM_COMPLETE)
  647. skb->ip_summed = CHECKSUM_NONE;
  648. i = n - --hdr->segments_left;
  649. rthdr = (struct rt0_hdr *) hdr;
  650. addr = rthdr->addr;
  651. addr += i - 1;
  652. switch (hdr->type) {
  653. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  654. case IPV6_SRCRT_TYPE_2:
  655. if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
  656. (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
  657. IPPROTO_ROUTING) < 0) {
  658. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  659. kfree_skb(skb);
  660. return -1;
  661. }
  662. if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
  663. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  664. kfree_skb(skb);
  665. return -1;
  666. }
  667. break;
  668. #endif
  669. default:
  670. break;
  671. }
  672. if (ipv6_addr_is_multicast(addr)) {
  673. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  674. kfree_skb(skb);
  675. return -1;
  676. }
  677. daddr = *addr;
  678. *addr = ipv6_hdr(skb)->daddr;
  679. ipv6_hdr(skb)->daddr = daddr;
  680. skb_dst_drop(skb);
  681. ip6_route_input(skb);
  682. if (skb_dst(skb)->error) {
  683. skb_push(skb, skb->data - skb_network_header(skb));
  684. dst_input(skb);
  685. return -1;
  686. }
  687. if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
  688. if (ipv6_hdr(skb)->hop_limit <= 1) {
  689. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  690. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  691. 0);
  692. kfree_skb(skb);
  693. return -1;
  694. }
  695. ipv6_hdr(skb)->hop_limit--;
  696. goto looped_back;
  697. }
  698. skb_push(skb, skb->data - skb_network_header(skb));
  699. dst_input(skb);
  700. return -1;
  701. unknown_rh:
  702. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  703. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  704. (&hdr->type) - skb_network_header(skb));
  705. return -1;
  706. }
  707. static const struct inet6_protocol rthdr_protocol = {
  708. .handler = ipv6_rthdr_rcv,
  709. .flags = INET6_PROTO_NOPOLICY,
  710. };
  711. static const struct inet6_protocol destopt_protocol = {
  712. .handler = ipv6_destopt_rcv,
  713. .flags = INET6_PROTO_NOPOLICY,
  714. };
  715. static const struct inet6_protocol nodata_protocol = {
  716. .handler = dst_discard,
  717. .flags = INET6_PROTO_NOPOLICY,
  718. };
  719. int __init ipv6_exthdrs_init(void)
  720. {
  721. int ret;
  722. ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  723. if (ret)
  724. goto out;
  725. ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  726. if (ret)
  727. goto out_rthdr;
  728. ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
  729. if (ret)
  730. goto out_destopt;
  731. out:
  732. return ret;
  733. out_destopt:
  734. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  735. out_rthdr:
  736. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  737. goto out;
  738. };
  739. void ipv6_exthdrs_exit(void)
  740. {
  741. inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
  742. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  743. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  744. }
  745. /**********************************
  746. Hop-by-hop options.
  747. **********************************/
  748. /*
  749. * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
  750. */
  751. static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
  752. {
  753. return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
  754. }
  755. static inline struct net *ipv6_skb_net(struct sk_buff *skb)
  756. {
  757. return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
  758. }
  759. /* Router Alert as of RFC 2711 */
  760. static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
  761. {
  762. const unsigned char *nh = skb_network_header(skb);
  763. if (nh[optoff + 1] == 2) {
  764. IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
  765. memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
  766. return true;
  767. }
  768. net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
  769. nh[optoff + 1]);
  770. kfree_skb(skb);
  771. return false;
  772. }
  773. /* Jumbo payload */
  774. static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
  775. {
  776. const unsigned char *nh = skb_network_header(skb);
  777. struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
  778. struct net *net = ipv6_skb_net(skb);
  779. u32 pkt_len;
  780. if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
  781. net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
  782. nh[optoff+1]);
  783. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  784. goto drop;
  785. }
  786. pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
  787. if (pkt_len <= IPV6_MAXPLEN) {
  788. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  789. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
  790. return false;
  791. }
  792. if (ipv6_hdr(skb)->payload_len) {
  793. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  794. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
  795. return false;
  796. }
  797. if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
  798. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS);
  799. goto drop;
  800. }
  801. if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
  802. goto drop;
  803. IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
  804. return true;
  805. drop:
  806. kfree_skb(skb);
  807. return false;
  808. }
  809. /* CALIPSO RFC 5570 */
  810. static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
  811. {
  812. const unsigned char *nh = skb_network_header(skb);
  813. if (nh[optoff + 1] < 8)
  814. goto drop;
  815. if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
  816. goto drop;
  817. if (!calipso_validate(skb, nh + optoff))
  818. goto drop;
  819. return true;
  820. drop:
  821. kfree_skb(skb);
  822. return false;
  823. }
  824. static const struct tlvtype_proc tlvprochopopt_lst[] = {
  825. {
  826. .type = IPV6_TLV_ROUTERALERT,
  827. .func = ipv6_hop_ra,
  828. },
  829. {
  830. .type = IPV6_TLV_JUMBO,
  831. .func = ipv6_hop_jumbo,
  832. },
  833. {
  834. .type = IPV6_TLV_CALIPSO,
  835. .func = ipv6_hop_calipso,
  836. },
  837. { -1, }
  838. };
  839. int ipv6_parse_hopopts(struct sk_buff *skb)
  840. {
  841. struct inet6_skb_parm *opt = IP6CB(skb);
  842. struct net *net = dev_net(skb->dev);
  843. int extlen;
  844. /*
  845. * skb_network_header(skb) is equal to skb->data, and
  846. * skb_network_header_len(skb) is always equal to
  847. * sizeof(struct ipv6hdr) by definition of
  848. * hop-by-hop options.
  849. */
  850. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
  851. !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
  852. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  853. fail_and_free:
  854. kfree_skb(skb);
  855. return -1;
  856. }
  857. extlen = (skb_transport_header(skb)[1] + 1) << 3;
  858. if (extlen > net->ipv6.sysctl.max_hbh_opts_len)
  859. goto fail_and_free;
  860. opt->flags |= IP6SKB_HOPBYHOP;
  861. if (ip6_parse_tlv(tlvprochopopt_lst, skb,
  862. net->ipv6.sysctl.max_hbh_opts_cnt)) {
  863. skb->transport_header += extlen;
  864. opt = IP6CB(skb);
  865. opt->nhoff = sizeof(struct ipv6hdr);
  866. return 1;
  867. }
  868. return -1;
  869. }
  870. /*
  871. * Creating outbound headers.
  872. *
  873. * "build" functions work when skb is filled from head to tail (datagram)
  874. * "push" functions work when headers are added from tail to head (tcp)
  875. *
  876. * In both cases we assume, that caller reserved enough room
  877. * for headers.
  878. */
  879. static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto,
  880. struct ipv6_rt_hdr *opt,
  881. struct in6_addr **addr_p, struct in6_addr *saddr)
  882. {
  883. struct rt0_hdr *phdr, *ihdr;
  884. int hops;
  885. ihdr = (struct rt0_hdr *) opt;
  886. phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
  887. memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
  888. hops = ihdr->rt_hdr.hdrlen >> 1;
  889. if (hops > 1)
  890. memcpy(phdr->addr, ihdr->addr + 1,
  891. (hops - 1) * sizeof(struct in6_addr));
  892. phdr->addr[hops - 1] = **addr_p;
  893. *addr_p = ihdr->addr;
  894. phdr->rt_hdr.nexthdr = *proto;
  895. *proto = NEXTHDR_ROUTING;
  896. }
  897. static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
  898. struct ipv6_rt_hdr *opt,
  899. struct in6_addr **addr_p, struct in6_addr *saddr)
  900. {
  901. struct ipv6_sr_hdr *sr_phdr, *sr_ihdr;
  902. int plen, hops;
  903. sr_ihdr = (struct ipv6_sr_hdr *)opt;
  904. plen = (sr_ihdr->hdrlen + 1) << 3;
  905. sr_phdr = skb_push(skb, plen);
  906. memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
  907. hops = sr_ihdr->first_segment + 1;
  908. memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1,
  909. (hops - 1) * sizeof(struct in6_addr));
  910. sr_phdr->segments[0] = **addr_p;
  911. *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
  912. if (sr_ihdr->hdrlen > hops * 2) {
  913. int tlvs_offset, tlvs_length;
  914. tlvs_offset = (1 + hops * 2) << 3;
  915. tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
  916. memcpy((char *)sr_phdr + tlvs_offset,
  917. (char *)sr_ihdr + tlvs_offset, tlvs_length);
  918. }
  919. #ifdef CONFIG_IPV6_SEG6_HMAC
  920. if (sr_has_hmac(sr_phdr)) {
  921. struct net *net = NULL;
  922. if (skb->dev)
  923. net = dev_net(skb->dev);
  924. else if (skb->sk)
  925. net = sock_net(skb->sk);
  926. WARN_ON(!net);
  927. if (net)
  928. seg6_push_hmac(net, saddr, sr_phdr);
  929. }
  930. #endif
  931. sr_phdr->nexthdr = *proto;
  932. *proto = NEXTHDR_ROUTING;
  933. }
  934. static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
  935. struct ipv6_rt_hdr *opt,
  936. struct in6_addr **addr_p, struct in6_addr *saddr)
  937. {
  938. switch (opt->type) {
  939. case IPV6_SRCRT_TYPE_0:
  940. case IPV6_SRCRT_STRICT:
  941. case IPV6_SRCRT_TYPE_2:
  942. ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
  943. break;
  944. case IPV6_SRCRT_TYPE_4:
  945. ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
  946. break;
  947. default:
  948. break;
  949. }
  950. }
  951. static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
  952. {
  953. struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
  954. memcpy(h, opt, ipv6_optlen(opt));
  955. h->nexthdr = *proto;
  956. *proto = type;
  957. }
  958. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  959. u8 *proto,
  960. struct in6_addr **daddr, struct in6_addr *saddr)
  961. {
  962. if (opt->srcrt) {
  963. ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
  964. /*
  965. * IPV6_RTHDRDSTOPTS is ignored
  966. * unless IPV6_RTHDR is set (RFC3542).
  967. */
  968. if (opt->dst0opt)
  969. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
  970. }
  971. if (opt->hopopt)
  972. ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
  973. }
  974. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
  975. {
  976. if (opt->dst1opt)
  977. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
  978. }
  979. EXPORT_SYMBOL(ipv6_push_frag_opts);
  980. struct ipv6_txoptions *
  981. ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
  982. {
  983. struct ipv6_txoptions *opt2;
  984. opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
  985. if (opt2) {
  986. long dif = (char *)opt2 - (char *)opt;
  987. memcpy(opt2, opt, opt->tot_len);
  988. if (opt2->hopopt)
  989. *((char **)&opt2->hopopt) += dif;
  990. if (opt2->dst0opt)
  991. *((char **)&opt2->dst0opt) += dif;
  992. if (opt2->dst1opt)
  993. *((char **)&opt2->dst1opt) += dif;
  994. if (opt2->srcrt)
  995. *((char **)&opt2->srcrt) += dif;
  996. refcount_set(&opt2->refcnt, 1);
  997. }
  998. return opt2;
  999. }
  1000. EXPORT_SYMBOL_GPL(ipv6_dup_options);
  1001. static void ipv6_renew_option(int renewtype,
  1002. struct ipv6_opt_hdr **dest,
  1003. struct ipv6_opt_hdr *old,
  1004. struct ipv6_opt_hdr *new,
  1005. int newtype, char **p)
  1006. {
  1007. struct ipv6_opt_hdr *src;
  1008. src = (renewtype == newtype ? new : old);
  1009. if (!src)
  1010. return;
  1011. memcpy(*p, src, ipv6_optlen(src));
  1012. *dest = (struct ipv6_opt_hdr *)*p;
  1013. *p += CMSG_ALIGN(ipv6_optlen(*dest));
  1014. }
  1015. /**
  1016. * ipv6_renew_options - replace a specific ext hdr with a new one.
  1017. *
  1018. * @sk: sock from which to allocate memory
  1019. * @opt: original options
  1020. * @newtype: option type to replace in @opt
  1021. * @newopt: new option of type @newtype to replace (user-mem)
  1022. *
  1023. * Returns a new set of options which is a copy of @opt with the
  1024. * option type @newtype replaced with @newopt.
  1025. *
  1026. * @opt may be NULL, in which case a new set of options is returned
  1027. * containing just @newopt.
  1028. *
  1029. * @newopt may be NULL, in which case the specified option type is
  1030. * not copied into the new set of options.
  1031. *
  1032. * The new set of options is allocated from the socket option memory
  1033. * buffer of @sk.
  1034. */
  1035. struct ipv6_txoptions *
  1036. ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
  1037. int newtype, struct ipv6_opt_hdr *newopt)
  1038. {
  1039. int tot_len = 0;
  1040. char *p;
  1041. struct ipv6_txoptions *opt2;
  1042. if (opt) {
  1043. if (newtype != IPV6_HOPOPTS && opt->hopopt)
  1044. tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
  1045. if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
  1046. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
  1047. if (newtype != IPV6_RTHDR && opt->srcrt)
  1048. tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
  1049. if (newtype != IPV6_DSTOPTS && opt->dst1opt)
  1050. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
  1051. }
  1052. if (newopt)
  1053. tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
  1054. if (!tot_len)
  1055. return NULL;
  1056. tot_len += sizeof(*opt2);
  1057. opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
  1058. if (!opt2)
  1059. return ERR_PTR(-ENOBUFS);
  1060. memset(opt2, 0, tot_len);
  1061. refcount_set(&opt2->refcnt, 1);
  1062. opt2->tot_len = tot_len;
  1063. p = (char *)(opt2 + 1);
  1064. ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
  1065. (opt ? opt->hopopt : NULL),
  1066. newopt, newtype, &p);
  1067. ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
  1068. (opt ? opt->dst0opt : NULL),
  1069. newopt, newtype, &p);
  1070. ipv6_renew_option(IPV6_RTHDR,
  1071. (struct ipv6_opt_hdr **)&opt2->srcrt,
  1072. (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
  1073. newopt, newtype, &p);
  1074. ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
  1075. (opt ? opt->dst1opt : NULL),
  1076. newopt, newtype, &p);
  1077. opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
  1078. (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
  1079. (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
  1080. opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
  1081. return opt2;
  1082. }
  1083. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  1084. struct ipv6_txoptions *opt)
  1085. {
  1086. /*
  1087. * ignore the dest before srcrt unless srcrt is being included.
  1088. * --yoshfuji
  1089. */
  1090. if (opt && opt->dst0opt && !opt->srcrt) {
  1091. if (opt_space != opt) {
  1092. memcpy(opt_space, opt, sizeof(*opt_space));
  1093. opt = opt_space;
  1094. }
  1095. opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
  1096. opt->dst0opt = NULL;
  1097. }
  1098. return opt;
  1099. }
  1100. EXPORT_SYMBOL_GPL(ipv6_fixup_options);
  1101. /**
  1102. * fl6_update_dst - update flowi destination address with info given
  1103. * by srcrt option, if any.
  1104. *
  1105. * @fl6: flowi6 for which daddr is to be updated
  1106. * @opt: struct ipv6_txoptions in which to look for srcrt opt
  1107. * @orig: copy of original daddr address if modified
  1108. *
  1109. * Returns NULL if no txoptions or no srcrt, otherwise returns orig
  1110. * and initial value of fl6->daddr set in orig
  1111. */
  1112. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  1113. const struct ipv6_txoptions *opt,
  1114. struct in6_addr *orig)
  1115. {
  1116. if (!opt || !opt->srcrt)
  1117. return NULL;
  1118. *orig = fl6->daddr;
  1119. switch (opt->srcrt->type) {
  1120. case IPV6_SRCRT_TYPE_0:
  1121. case IPV6_SRCRT_STRICT:
  1122. case IPV6_SRCRT_TYPE_2:
  1123. fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
  1124. break;
  1125. case IPV6_SRCRT_TYPE_4:
  1126. {
  1127. struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt;
  1128. fl6->daddr = srh->segments[srh->segments_left];
  1129. break;
  1130. }
  1131. default:
  1132. return NULL;
  1133. }
  1134. return orig;
  1135. }
  1136. EXPORT_SYMBOL_GPL(fl6_update_dst);