xfrm_algo.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. /*
  2. * xfrm algorithm interface
  3. *
  4. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pfkeyv2.h>
  14. #include <linux/crypto.h>
  15. #include <net/xfrm.h>
  16. #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
  17. #include <net/ah.h>
  18. #endif
  19. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  20. #include <net/esp.h>
  21. #endif
  22. #include <asm/scatterlist.h>
  23. /*
  24. * Algorithms supported by IPsec. These entries contain properties which
  25. * are used in key negotiation and xfrm processing, and are used to verify
  26. * that instantiated crypto transforms have correct parameters for IPsec
  27. * purposes.
  28. */
  29. static struct xfrm_algo_desc aalg_list[] = {
  30. {
  31. .name = "hmac(digest_null)",
  32. .compat = "digest_null",
  33. .uinfo = {
  34. .auth = {
  35. .icv_truncbits = 0,
  36. .icv_fullbits = 0,
  37. }
  38. },
  39. .desc = {
  40. .sadb_alg_id = SADB_X_AALG_NULL,
  41. .sadb_alg_ivlen = 0,
  42. .sadb_alg_minbits = 0,
  43. .sadb_alg_maxbits = 0
  44. }
  45. },
  46. {
  47. .name = "hmac(md5)",
  48. .compat = "md5",
  49. .uinfo = {
  50. .auth = {
  51. .icv_truncbits = 96,
  52. .icv_fullbits = 128,
  53. }
  54. },
  55. .desc = {
  56. .sadb_alg_id = SADB_AALG_MD5HMAC,
  57. .sadb_alg_ivlen = 0,
  58. .sadb_alg_minbits = 128,
  59. .sadb_alg_maxbits = 128
  60. }
  61. },
  62. {
  63. .name = "hmac(sha1)",
  64. .compat = "sha1",
  65. .uinfo = {
  66. .auth = {
  67. .icv_truncbits = 96,
  68. .icv_fullbits = 160,
  69. }
  70. },
  71. .desc = {
  72. .sadb_alg_id = SADB_AALG_SHA1HMAC,
  73. .sadb_alg_ivlen = 0,
  74. .sadb_alg_minbits = 160,
  75. .sadb_alg_maxbits = 160
  76. }
  77. },
  78. {
  79. .name = "hmac(sha256)",
  80. .compat = "sha256",
  81. .uinfo = {
  82. .auth = {
  83. .icv_truncbits = 96,
  84. .icv_fullbits = 256,
  85. }
  86. },
  87. .desc = {
  88. .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
  89. .sadb_alg_ivlen = 0,
  90. .sadb_alg_minbits = 256,
  91. .sadb_alg_maxbits = 256
  92. }
  93. },
  94. {
  95. .name = "hmac(ripemd160)",
  96. .compat = "ripemd160",
  97. .uinfo = {
  98. .auth = {
  99. .icv_truncbits = 96,
  100. .icv_fullbits = 160,
  101. }
  102. },
  103. .desc = {
  104. .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
  105. .sadb_alg_ivlen = 0,
  106. .sadb_alg_minbits = 160,
  107. .sadb_alg_maxbits = 160
  108. }
  109. },
  110. {
  111. .name = "xcbc(aes)",
  112. .uinfo = {
  113. .auth = {
  114. .icv_truncbits = 96,
  115. .icv_fullbits = 128,
  116. }
  117. },
  118. .desc = {
  119. .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
  120. .sadb_alg_ivlen = 0,
  121. .sadb_alg_minbits = 128,
  122. .sadb_alg_maxbits = 128
  123. }
  124. },
  125. };
  126. static struct xfrm_algo_desc ealg_list[] = {
  127. {
  128. .name = "ecb(cipher_null)",
  129. .compat = "cipher_null",
  130. .uinfo = {
  131. .encr = {
  132. .blockbits = 8,
  133. .defkeybits = 0,
  134. }
  135. },
  136. .desc = {
  137. .sadb_alg_id = SADB_EALG_NULL,
  138. .sadb_alg_ivlen = 0,
  139. .sadb_alg_minbits = 0,
  140. .sadb_alg_maxbits = 0
  141. }
  142. },
  143. {
  144. .name = "cbc(des)",
  145. .compat = "des",
  146. .uinfo = {
  147. .encr = {
  148. .blockbits = 64,
  149. .defkeybits = 64,
  150. }
  151. },
  152. .desc = {
  153. .sadb_alg_id = SADB_EALG_DESCBC,
  154. .sadb_alg_ivlen = 8,
  155. .sadb_alg_minbits = 64,
  156. .sadb_alg_maxbits = 64
  157. }
  158. },
  159. {
  160. .name = "cbc(des3_ede)",
  161. .compat = "des3_ede",
  162. .uinfo = {
  163. .encr = {
  164. .blockbits = 64,
  165. .defkeybits = 192,
  166. }
  167. },
  168. .desc = {
  169. .sadb_alg_id = SADB_EALG_3DESCBC,
  170. .sadb_alg_ivlen = 8,
  171. .sadb_alg_minbits = 192,
  172. .sadb_alg_maxbits = 192
  173. }
  174. },
  175. {
  176. .name = "cbc(cast128)",
  177. .compat = "cast128",
  178. .uinfo = {
  179. .encr = {
  180. .blockbits = 64,
  181. .defkeybits = 128,
  182. }
  183. },
  184. .desc = {
  185. .sadb_alg_id = SADB_X_EALG_CASTCBC,
  186. .sadb_alg_ivlen = 8,
  187. .sadb_alg_minbits = 40,
  188. .sadb_alg_maxbits = 128
  189. }
  190. },
  191. {
  192. .name = "cbc(blowfish)",
  193. .compat = "blowfish",
  194. .uinfo = {
  195. .encr = {
  196. .blockbits = 64,
  197. .defkeybits = 128,
  198. }
  199. },
  200. .desc = {
  201. .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
  202. .sadb_alg_ivlen = 8,
  203. .sadb_alg_minbits = 40,
  204. .sadb_alg_maxbits = 448
  205. }
  206. },
  207. {
  208. .name = "cbc(aes)",
  209. .compat = "aes",
  210. .uinfo = {
  211. .encr = {
  212. .blockbits = 128,
  213. .defkeybits = 128,
  214. }
  215. },
  216. .desc = {
  217. .sadb_alg_id = SADB_X_EALG_AESCBC,
  218. .sadb_alg_ivlen = 8,
  219. .sadb_alg_minbits = 128,
  220. .sadb_alg_maxbits = 256
  221. }
  222. },
  223. {
  224. .name = "cbc(serpent)",
  225. .compat = "serpent",
  226. .uinfo = {
  227. .encr = {
  228. .blockbits = 128,
  229. .defkeybits = 128,
  230. }
  231. },
  232. .desc = {
  233. .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
  234. .sadb_alg_ivlen = 8,
  235. .sadb_alg_minbits = 128,
  236. .sadb_alg_maxbits = 256,
  237. }
  238. },
  239. {
  240. .name = "cbc(camellia)",
  241. .uinfo = {
  242. .encr = {
  243. .blockbits = 128,
  244. .defkeybits = 128,
  245. }
  246. },
  247. .desc = {
  248. .sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
  249. .sadb_alg_ivlen = 8,
  250. .sadb_alg_minbits = 128,
  251. .sadb_alg_maxbits = 256
  252. }
  253. },
  254. {
  255. .name = "cbc(twofish)",
  256. .compat = "twofish",
  257. .uinfo = {
  258. .encr = {
  259. .blockbits = 128,
  260. .defkeybits = 128,
  261. }
  262. },
  263. .desc = {
  264. .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
  265. .sadb_alg_ivlen = 8,
  266. .sadb_alg_minbits = 128,
  267. .sadb_alg_maxbits = 256
  268. }
  269. },
  270. };
  271. static struct xfrm_algo_desc calg_list[] = {
  272. {
  273. .name = "deflate",
  274. .uinfo = {
  275. .comp = {
  276. .threshold = 90,
  277. }
  278. },
  279. .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
  280. },
  281. {
  282. .name = "lzs",
  283. .uinfo = {
  284. .comp = {
  285. .threshold = 90,
  286. }
  287. },
  288. .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
  289. },
  290. {
  291. .name = "lzjh",
  292. .uinfo = {
  293. .comp = {
  294. .threshold = 50,
  295. }
  296. },
  297. .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
  298. },
  299. };
  300. static inline int aalg_entries(void)
  301. {
  302. return ARRAY_SIZE(aalg_list);
  303. }
  304. static inline int ealg_entries(void)
  305. {
  306. return ARRAY_SIZE(ealg_list);
  307. }
  308. static inline int calg_entries(void)
  309. {
  310. return ARRAY_SIZE(calg_list);
  311. }
  312. /* Todo: generic iterators */
  313. struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
  314. {
  315. int i;
  316. for (i = 0; i < aalg_entries(); i++) {
  317. if (aalg_list[i].desc.sadb_alg_id == alg_id) {
  318. if (aalg_list[i].available)
  319. return &aalg_list[i];
  320. else
  321. break;
  322. }
  323. }
  324. return NULL;
  325. }
  326. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
  327. struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
  328. {
  329. int i;
  330. for (i = 0; i < ealg_entries(); i++) {
  331. if (ealg_list[i].desc.sadb_alg_id == alg_id) {
  332. if (ealg_list[i].available)
  333. return &ealg_list[i];
  334. else
  335. break;
  336. }
  337. }
  338. return NULL;
  339. }
  340. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
  341. struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
  342. {
  343. int i;
  344. for (i = 0; i < calg_entries(); i++) {
  345. if (calg_list[i].desc.sadb_alg_id == alg_id) {
  346. if (calg_list[i].available)
  347. return &calg_list[i];
  348. else
  349. break;
  350. }
  351. }
  352. return NULL;
  353. }
  354. EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
  355. static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
  356. int entries, u32 type, u32 mask,
  357. char *name, int probe)
  358. {
  359. int i, status;
  360. if (!name)
  361. return NULL;
  362. for (i = 0; i < entries; i++) {
  363. if (strcmp(name, list[i].name) &&
  364. (!list[i].compat || strcmp(name, list[i].compat)))
  365. continue;
  366. if (list[i].available)
  367. return &list[i];
  368. if (!probe)
  369. break;
  370. status = crypto_has_alg(list[i].name, type,
  371. mask | CRYPTO_ALG_ASYNC);
  372. if (!status)
  373. break;
  374. list[i].available = status;
  375. return &list[i];
  376. }
  377. return NULL;
  378. }
  379. struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
  380. {
  381. return xfrm_get_byname(aalg_list, aalg_entries(),
  382. CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
  383. name, probe);
  384. }
  385. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
  386. struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
  387. {
  388. return xfrm_get_byname(ealg_list, ealg_entries(),
  389. CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
  390. name, probe);
  391. }
  392. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
  393. struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
  394. {
  395. return xfrm_get_byname(calg_list, calg_entries(),
  396. CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
  397. name, probe);
  398. }
  399. EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
  400. struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
  401. {
  402. if (idx >= aalg_entries())
  403. return NULL;
  404. return &aalg_list[idx];
  405. }
  406. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
  407. struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
  408. {
  409. if (idx >= ealg_entries())
  410. return NULL;
  411. return &ealg_list[idx];
  412. }
  413. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
  414. /*
  415. * Probe for the availability of crypto algorithms, and set the available
  416. * flag for any algorithms found on the system. This is typically called by
  417. * pfkey during userspace SA add, update or register.
  418. */
  419. void xfrm_probe_algs(void)
  420. {
  421. #ifdef CONFIG_CRYPTO
  422. int i, status;
  423. BUG_ON(in_softirq());
  424. for (i = 0; i < aalg_entries(); i++) {
  425. status = crypto_has_hash(aalg_list[i].name, 0,
  426. CRYPTO_ALG_ASYNC);
  427. if (aalg_list[i].available != status)
  428. aalg_list[i].available = status;
  429. }
  430. for (i = 0; i < ealg_entries(); i++) {
  431. status = crypto_has_blkcipher(ealg_list[i].name, 0,
  432. CRYPTO_ALG_ASYNC);
  433. if (ealg_list[i].available != status)
  434. ealg_list[i].available = status;
  435. }
  436. for (i = 0; i < calg_entries(); i++) {
  437. status = crypto_has_comp(calg_list[i].name, 0,
  438. CRYPTO_ALG_ASYNC);
  439. if (calg_list[i].available != status)
  440. calg_list[i].available = status;
  441. }
  442. #endif
  443. }
  444. EXPORT_SYMBOL_GPL(xfrm_probe_algs);
  445. int xfrm_count_auth_supported(void)
  446. {
  447. int i, n;
  448. for (i = 0, n = 0; i < aalg_entries(); i++)
  449. if (aalg_list[i].available)
  450. n++;
  451. return n;
  452. }
  453. EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
  454. int xfrm_count_enc_supported(void)
  455. {
  456. int i, n;
  457. for (i = 0, n = 0; i < ealg_entries(); i++)
  458. if (ealg_list[i].available)
  459. n++;
  460. return n;
  461. }
  462. EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
  463. /* Move to common area: it is shared with AH. */
  464. int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
  465. int offset, int len, icv_update_fn_t icv_update)
  466. {
  467. int start = skb_headlen(skb);
  468. int i, copy = start - offset;
  469. int err;
  470. struct scatterlist sg;
  471. /* Checksum header. */
  472. if (copy > 0) {
  473. if (copy > len)
  474. copy = len;
  475. sg.page = virt_to_page(skb->data + offset);
  476. sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  477. sg.length = copy;
  478. err = icv_update(desc, &sg, copy);
  479. if (unlikely(err))
  480. return err;
  481. if ((len -= copy) == 0)
  482. return 0;
  483. offset += copy;
  484. }
  485. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  486. int end;
  487. BUG_TRAP(start <= offset + len);
  488. end = start + skb_shinfo(skb)->frags[i].size;
  489. if ((copy = end - offset) > 0) {
  490. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  491. if (copy > len)
  492. copy = len;
  493. sg.page = frag->page;
  494. sg.offset = frag->page_offset + offset-start;
  495. sg.length = copy;
  496. err = icv_update(desc, &sg, copy);
  497. if (unlikely(err))
  498. return err;
  499. if (!(len -= copy))
  500. return 0;
  501. offset += copy;
  502. }
  503. start = end;
  504. }
  505. if (skb_shinfo(skb)->frag_list) {
  506. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  507. for (; list; list = list->next) {
  508. int end;
  509. BUG_TRAP(start <= offset + len);
  510. end = start + list->len;
  511. if ((copy = end - offset) > 0) {
  512. if (copy > len)
  513. copy = len;
  514. err = skb_icv_walk(list, desc, offset-start,
  515. copy, icv_update);
  516. if (unlikely(err))
  517. return err;
  518. if ((len -= copy) == 0)
  519. return 0;
  520. offset += copy;
  521. }
  522. start = end;
  523. }
  524. }
  525. BUG_ON(len);
  526. return 0;
  527. }
  528. EXPORT_SYMBOL_GPL(skb_icv_walk);
  529. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  530. /* Looking generic it is not used in another places. */
  531. int
  532. skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  533. {
  534. int start = skb_headlen(skb);
  535. int i, copy = start - offset;
  536. int elt = 0;
  537. if (copy > 0) {
  538. if (copy > len)
  539. copy = len;
  540. sg[elt].page = virt_to_page(skb->data + offset);
  541. sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  542. sg[elt].length = copy;
  543. elt++;
  544. if ((len -= copy) == 0)
  545. return elt;
  546. offset += copy;
  547. }
  548. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  549. int end;
  550. BUG_TRAP(start <= offset + len);
  551. end = start + skb_shinfo(skb)->frags[i].size;
  552. if ((copy = end - offset) > 0) {
  553. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  554. if (copy > len)
  555. copy = len;
  556. sg[elt].page = frag->page;
  557. sg[elt].offset = frag->page_offset+offset-start;
  558. sg[elt].length = copy;
  559. elt++;
  560. if (!(len -= copy))
  561. return elt;
  562. offset += copy;
  563. }
  564. start = end;
  565. }
  566. if (skb_shinfo(skb)->frag_list) {
  567. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  568. for (; list; list = list->next) {
  569. int end;
  570. BUG_TRAP(start <= offset + len);
  571. end = start + list->len;
  572. if ((copy = end - offset) > 0) {
  573. if (copy > len)
  574. copy = len;
  575. elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
  576. if ((len -= copy) == 0)
  577. return elt;
  578. offset += copy;
  579. }
  580. start = end;
  581. }
  582. }
  583. BUG_ON(len);
  584. return elt;
  585. }
  586. EXPORT_SYMBOL_GPL(skb_to_sgvec);
  587. /* Check that skb data bits are writable. If they are not, copy data
  588. * to newly created private area. If "tailbits" is given, make sure that
  589. * tailbits bytes beyond current end of skb are writable.
  590. *
  591. * Returns amount of elements of scatterlist to load for subsequent
  592. * transformations and pointer to writable trailer skb.
  593. */
  594. int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
  595. {
  596. int copyflag;
  597. int elt;
  598. struct sk_buff *skb1, **skb_p;
  599. /* If skb is cloned or its head is paged, reallocate
  600. * head pulling out all the pages (pages are considered not writable
  601. * at the moment even if they are anonymous).
  602. */
  603. if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
  604. __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
  605. return -ENOMEM;
  606. /* Easy case. Most of packets will go this way. */
  607. if (!skb_shinfo(skb)->frag_list) {
  608. /* A little of trouble, not enough of space for trailer.
  609. * This should not happen, when stack is tuned to generate
  610. * good frames. OK, on miss we reallocate and reserve even more
  611. * space, 128 bytes is fair. */
  612. if (skb_tailroom(skb) < tailbits &&
  613. pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
  614. return -ENOMEM;
  615. /* Voila! */
  616. *trailer = skb;
  617. return 1;
  618. }
  619. /* Misery. We are in troubles, going to mincer fragments... */
  620. elt = 1;
  621. skb_p = &skb_shinfo(skb)->frag_list;
  622. copyflag = 0;
  623. while ((skb1 = *skb_p) != NULL) {
  624. int ntail = 0;
  625. /* The fragment is partially pulled by someone,
  626. * this can happen on input. Copy it and everything
  627. * after it. */
  628. if (skb_shared(skb1))
  629. copyflag = 1;
  630. /* If the skb is the last, worry about trailer. */
  631. if (skb1->next == NULL && tailbits) {
  632. if (skb_shinfo(skb1)->nr_frags ||
  633. skb_shinfo(skb1)->frag_list ||
  634. skb_tailroom(skb1) < tailbits)
  635. ntail = tailbits + 128;
  636. }
  637. if (copyflag ||
  638. skb_cloned(skb1) ||
  639. ntail ||
  640. skb_shinfo(skb1)->nr_frags ||
  641. skb_shinfo(skb1)->frag_list) {
  642. struct sk_buff *skb2;
  643. /* Fuck, we are miserable poor guys... */
  644. if (ntail == 0)
  645. skb2 = skb_copy(skb1, GFP_ATOMIC);
  646. else
  647. skb2 = skb_copy_expand(skb1,
  648. skb_headroom(skb1),
  649. ntail,
  650. GFP_ATOMIC);
  651. if (unlikely(skb2 == NULL))
  652. return -ENOMEM;
  653. if (skb1->sk)
  654. skb_set_owner_w(skb2, skb1->sk);
  655. /* Looking around. Are we still alive?
  656. * OK, link new skb, drop old one */
  657. skb2->next = skb1->next;
  658. *skb_p = skb2;
  659. kfree_skb(skb1);
  660. skb1 = skb2;
  661. }
  662. elt++;
  663. *trailer = skb1;
  664. skb_p = &skb1->next;
  665. }
  666. return elt;
  667. }
  668. EXPORT_SYMBOL_GPL(skb_cow_data);
  669. void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
  670. {
  671. if (tail != skb) {
  672. skb->data_len += len;
  673. skb->len += len;
  674. }
  675. return skb_put(tail, len);
  676. }
  677. EXPORT_SYMBOL_GPL(pskb_put);
  678. #endif