scatterlist.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
  4. *
  5. * Scatterlist handling helpers.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/slab.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/highmem.h>
  11. #include <linux/kmemleak.h>
  12. /**
  13. * sg_next - return the next scatterlist entry in a list
  14. * @sg: The current sg entry
  15. *
  16. * Description:
  17. * Usually the next entry will be @sg@ + 1, but if this sg element is part
  18. * of a chained scatterlist, it could jump to the start of a new
  19. * scatterlist array.
  20. *
  21. **/
  22. struct scatterlist *sg_next(struct scatterlist *sg)
  23. {
  24. if (sg_is_last(sg))
  25. return NULL;
  26. sg++;
  27. if (unlikely(sg_is_chain(sg)))
  28. sg = sg_chain_ptr(sg);
  29. return sg;
  30. }
  31. EXPORT_SYMBOL(sg_next);
  32. /**
  33. * sg_nents - return total count of entries in scatterlist
  34. * @sg: The scatterlist
  35. *
  36. * Description:
  37. * Allows to know how many entries are in sg, taking into acount
  38. * chaining as well
  39. *
  40. **/
  41. int sg_nents(struct scatterlist *sg)
  42. {
  43. int nents;
  44. for (nents = 0; sg; sg = sg_next(sg))
  45. nents++;
  46. return nents;
  47. }
  48. EXPORT_SYMBOL(sg_nents);
  49. /**
  50. * sg_nents_for_len - return total count of entries in scatterlist
  51. * needed to satisfy the supplied length
  52. * @sg: The scatterlist
  53. * @len: The total required length
  54. *
  55. * Description:
  56. * Determines the number of entries in sg that are required to meet
  57. * the supplied length, taking into acount chaining as well
  58. *
  59. * Returns:
  60. * the number of sg entries needed, negative error on failure
  61. *
  62. **/
  63. int sg_nents_for_len(struct scatterlist *sg, u64 len)
  64. {
  65. int nents;
  66. u64 total;
  67. if (!len)
  68. return 0;
  69. for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
  70. nents++;
  71. total += sg->length;
  72. if (total >= len)
  73. return nents;
  74. }
  75. return -EINVAL;
  76. }
  77. EXPORT_SYMBOL(sg_nents_for_len);
  78. /**
  79. * sg_last - return the last scatterlist entry in a list
  80. * @sgl: First entry in the scatterlist
  81. * @nents: Number of entries in the scatterlist
  82. *
  83. * Description:
  84. * Should only be used casually, it (currently) scans the entire list
  85. * to get the last entry.
  86. *
  87. * Note that the @sgl@ pointer passed in need not be the first one,
  88. * the important bit is that @nents@ denotes the number of entries that
  89. * exist from @sgl@.
  90. *
  91. **/
  92. struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
  93. {
  94. struct scatterlist *sg, *ret = NULL;
  95. unsigned int i;
  96. for_each_sg(sgl, sg, nents, i)
  97. ret = sg;
  98. BUG_ON(!sg_is_last(ret));
  99. return ret;
  100. }
  101. EXPORT_SYMBOL(sg_last);
  102. /**
  103. * sg_init_table - Initialize SG table
  104. * @sgl: The SG table
  105. * @nents: Number of entries in table
  106. *
  107. * Notes:
  108. * If this is part of a chained sg table, sg_mark_end() should be
  109. * used only on the last table part.
  110. *
  111. **/
  112. void sg_init_table(struct scatterlist *sgl, unsigned int nents)
  113. {
  114. memset(sgl, 0, sizeof(*sgl) * nents);
  115. sg_init_marker(sgl, nents);
  116. }
  117. EXPORT_SYMBOL(sg_init_table);
  118. /**
  119. * sg_init_one - Initialize a single entry sg list
  120. * @sg: SG entry
  121. * @buf: Virtual address for IO
  122. * @buflen: IO length
  123. *
  124. **/
  125. void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
  126. {
  127. sg_init_table(sg, 1);
  128. sg_set_buf(sg, buf, buflen);
  129. }
  130. EXPORT_SYMBOL(sg_init_one);
  131. /*
  132. * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
  133. * helpers.
  134. */
  135. static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
  136. {
  137. if (nents == SG_MAX_SINGLE_ALLOC) {
  138. /*
  139. * Kmemleak doesn't track page allocations as they are not
  140. * commonly used (in a raw form) for kernel data structures.
  141. * As we chain together a list of pages and then a normal
  142. * kmalloc (tracked by kmemleak), in order to for that last
  143. * allocation not to become decoupled (and thus a
  144. * false-positive) we need to inform kmemleak of all the
  145. * intermediate allocations.
  146. */
  147. void *ptr = (void *) __get_free_page(gfp_mask);
  148. kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
  149. return ptr;
  150. } else
  151. return kmalloc_array(nents, sizeof(struct scatterlist),
  152. gfp_mask);
  153. }
  154. static void sg_kfree(struct scatterlist *sg, unsigned int nents)
  155. {
  156. if (nents == SG_MAX_SINGLE_ALLOC) {
  157. kmemleak_free(sg);
  158. free_page((unsigned long) sg);
  159. } else
  160. kfree(sg);
  161. }
  162. /**
  163. * __sg_free_table - Free a previously mapped sg table
  164. * @table: The sg table header to use
  165. * @max_ents: The maximum number of entries per single scatterlist
  166. * @nents_first_chunk: Number of entries int the (preallocated) first
  167. * scatterlist chunk, 0 means no such preallocated first chunk
  168. * @free_fn: Free function
  169. *
  170. * Description:
  171. * Free an sg table previously allocated and setup with
  172. * __sg_alloc_table(). The @max_ents value must be identical to
  173. * that previously used with __sg_alloc_table().
  174. *
  175. **/
  176. void __sg_free_table(struct sg_table *table, unsigned int max_ents,
  177. unsigned int nents_first_chunk, sg_free_fn *free_fn)
  178. {
  179. struct scatterlist *sgl, *next;
  180. unsigned curr_max_ents = nents_first_chunk ?: max_ents;
  181. if (unlikely(!table->sgl))
  182. return;
  183. sgl = table->sgl;
  184. while (table->orig_nents) {
  185. unsigned int alloc_size = table->orig_nents;
  186. unsigned int sg_size;
  187. /*
  188. * If we have more than max_ents segments left,
  189. * then assign 'next' to the sg table after the current one.
  190. * sg_size is then one less than alloc size, since the last
  191. * element is the chain pointer.
  192. */
  193. if (alloc_size > curr_max_ents) {
  194. next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
  195. alloc_size = curr_max_ents;
  196. sg_size = alloc_size - 1;
  197. } else {
  198. sg_size = alloc_size;
  199. next = NULL;
  200. }
  201. table->orig_nents -= sg_size;
  202. if (nents_first_chunk)
  203. nents_first_chunk = 0;
  204. else
  205. free_fn(sgl, alloc_size);
  206. sgl = next;
  207. curr_max_ents = max_ents;
  208. }
  209. table->sgl = NULL;
  210. }
  211. EXPORT_SYMBOL(__sg_free_table);
  212. /**
  213. * sg_free_table - Free a previously allocated sg table
  214. * @table: The mapped sg table header
  215. *
  216. **/
  217. void sg_free_table(struct sg_table *table)
  218. {
  219. __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
  220. }
  221. EXPORT_SYMBOL(sg_free_table);
  222. /**
  223. * __sg_alloc_table - Allocate and initialize an sg table with given allocator
  224. * @table: The sg table header to use
  225. * @nents: Number of entries in sg list
  226. * @max_ents: The maximum number of entries the allocator returns per call
  227. * @nents_first_chunk: Number of entries int the (preallocated) first
  228. * scatterlist chunk, 0 means no such preallocated chunk provided by user
  229. * @gfp_mask: GFP allocation mask
  230. * @alloc_fn: Allocator to use
  231. *
  232. * Description:
  233. * This function returns a @table @nents long. The allocator is
  234. * defined to return scatterlist chunks of maximum size @max_ents.
  235. * Thus if @nents is bigger than @max_ents, the scatterlists will be
  236. * chained in units of @max_ents.
  237. *
  238. * Notes:
  239. * If this function returns non-0 (eg failure), the caller must call
  240. * __sg_free_table() to cleanup any leftover allocations.
  241. *
  242. **/
  243. int __sg_alloc_table(struct sg_table *table, unsigned int nents,
  244. unsigned int max_ents, struct scatterlist *first_chunk,
  245. unsigned int nents_first_chunk, gfp_t gfp_mask,
  246. sg_alloc_fn *alloc_fn)
  247. {
  248. struct scatterlist *sg, *prv;
  249. unsigned int left;
  250. unsigned curr_max_ents = nents_first_chunk ?: max_ents;
  251. unsigned prv_max_ents;
  252. memset(table, 0, sizeof(*table));
  253. if (nents == 0)
  254. return -EINVAL;
  255. #ifdef CONFIG_ARCH_NO_SG_CHAIN
  256. if (WARN_ON_ONCE(nents > max_ents))
  257. return -EINVAL;
  258. #endif
  259. left = nents;
  260. prv = NULL;
  261. do {
  262. unsigned int sg_size, alloc_size = left;
  263. if (alloc_size > curr_max_ents) {
  264. alloc_size = curr_max_ents;
  265. sg_size = alloc_size - 1;
  266. } else
  267. sg_size = alloc_size;
  268. left -= sg_size;
  269. if (first_chunk) {
  270. sg = first_chunk;
  271. first_chunk = NULL;
  272. } else {
  273. sg = alloc_fn(alloc_size, gfp_mask);
  274. }
  275. if (unlikely(!sg)) {
  276. /*
  277. * Adjust entry count to reflect that the last
  278. * entry of the previous table won't be used for
  279. * linkage. Without this, sg_kfree() may get
  280. * confused.
  281. */
  282. if (prv)
  283. table->nents = ++table->orig_nents;
  284. return -ENOMEM;
  285. }
  286. sg_init_table(sg, alloc_size);
  287. table->nents = table->orig_nents += sg_size;
  288. /*
  289. * If this is the first mapping, assign the sg table header.
  290. * If this is not the first mapping, chain previous part.
  291. */
  292. if (prv)
  293. sg_chain(prv, prv_max_ents, sg);
  294. else
  295. table->sgl = sg;
  296. /*
  297. * If no more entries after this one, mark the end
  298. */
  299. if (!left)
  300. sg_mark_end(&sg[sg_size - 1]);
  301. prv = sg;
  302. prv_max_ents = curr_max_ents;
  303. curr_max_ents = max_ents;
  304. } while (left);
  305. return 0;
  306. }
  307. EXPORT_SYMBOL(__sg_alloc_table);
  308. /**
  309. * sg_alloc_table - Allocate and initialize an sg table
  310. * @table: The sg table header to use
  311. * @nents: Number of entries in sg list
  312. * @gfp_mask: GFP allocation mask
  313. *
  314. * Description:
  315. * Allocate and initialize an sg table. If @nents@ is larger than
  316. * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
  317. *
  318. **/
  319. int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
  320. {
  321. int ret;
  322. ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
  323. NULL, 0, gfp_mask, sg_kmalloc);
  324. if (unlikely(ret))
  325. __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
  326. return ret;
  327. }
  328. EXPORT_SYMBOL(sg_alloc_table);
  329. static struct scatterlist *get_next_sg(struct sg_table *table,
  330. struct scatterlist *cur,
  331. unsigned long needed_sges,
  332. gfp_t gfp_mask)
  333. {
  334. struct scatterlist *new_sg, *next_sg;
  335. unsigned int alloc_size;
  336. if (cur) {
  337. next_sg = sg_next(cur);
  338. /* Check if last entry should be keeped for chainning */
  339. if (!sg_is_last(next_sg) || needed_sges == 1)
  340. return next_sg;
  341. }
  342. alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
  343. new_sg = sg_kmalloc(alloc_size, gfp_mask);
  344. if (!new_sg)
  345. return ERR_PTR(-ENOMEM);
  346. sg_init_table(new_sg, alloc_size);
  347. if (cur) {
  348. __sg_chain(next_sg, new_sg);
  349. table->orig_nents += alloc_size - 1;
  350. } else {
  351. table->sgl = new_sg;
  352. table->orig_nents = alloc_size;
  353. table->nents = 0;
  354. }
  355. return new_sg;
  356. }
  357. /**
  358. * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
  359. * an array of pages
  360. * @sgt: The sg table header to use
  361. * @pages: Pointer to an array of page pointers
  362. * @n_pages: Number of pages in the pages array
  363. * @offset: Offset from start of the first page to the start of a buffer
  364. * @size: Number of valid bytes in the buffer (after offset)
  365. * @max_segment: Maximum size of a scatterlist element in bytes
  366. * @prv: Last populated sge in sgt
  367. * @left_pages: Left pages caller have to set after this call
  368. * @gfp_mask: GFP allocation mask
  369. *
  370. * Description:
  371. * If @prv is NULL, allocate and initialize an sg table from a list of pages,
  372. * else reuse the scatterlist passed in at @prv.
  373. * Contiguous ranges of the pages are squashed into a single scatterlist
  374. * entry up to the maximum size specified in @max_segment. A user may
  375. * provide an offset at a start and a size of valid data in a buffer
  376. * specified by the page array.
  377. *
  378. * Returns:
  379. * Last SGE in sgt on success, PTR_ERR on otherwise.
  380. * The allocation in @sgt must be released by sg_free_table.
  381. *
  382. * Notes:
  383. * If this function returns non-0 (eg failure), the caller must call
  384. * sg_free_table() to cleanup any leftover allocations.
  385. */
  386. struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
  387. struct page **pages, unsigned int n_pages, unsigned int offset,
  388. unsigned long size, unsigned int max_segment,
  389. struct scatterlist *prv, unsigned int left_pages,
  390. gfp_t gfp_mask)
  391. {
  392. unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
  393. unsigned int added_nents = 0;
  394. struct scatterlist *s = prv;
  395. /*
  396. * The algorithm below requires max_segment to be aligned to PAGE_SIZE
  397. * otherwise it can overshoot.
  398. */
  399. max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
  400. if (WARN_ON(max_segment < PAGE_SIZE))
  401. return ERR_PTR(-EINVAL);
  402. if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv)
  403. return ERR_PTR(-EOPNOTSUPP);
  404. if (prv) {
  405. unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE +
  406. prv->offset + prv->length) /
  407. PAGE_SIZE;
  408. if (WARN_ON(offset))
  409. return ERR_PTR(-EINVAL);
  410. /* Merge contiguous pages into the last SG */
  411. prv_len = prv->length;
  412. while (n_pages && page_to_pfn(pages[0]) == paddr) {
  413. if (prv->length + PAGE_SIZE > max_segment)
  414. break;
  415. prv->length += PAGE_SIZE;
  416. paddr++;
  417. pages++;
  418. n_pages--;
  419. }
  420. if (!n_pages)
  421. goto out;
  422. }
  423. /* compute number of contiguous chunks */
  424. chunks = 1;
  425. seg_len = 0;
  426. for (i = 1; i < n_pages; i++) {
  427. seg_len += PAGE_SIZE;
  428. if (seg_len >= max_segment ||
  429. page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
  430. chunks++;
  431. seg_len = 0;
  432. }
  433. }
  434. /* merging chunks and putting them into the scatterlist */
  435. cur_page = 0;
  436. for (i = 0; i < chunks; i++) {
  437. unsigned int j, chunk_size;
  438. /* look for the end of the current chunk */
  439. seg_len = 0;
  440. for (j = cur_page + 1; j < n_pages; j++) {
  441. seg_len += PAGE_SIZE;
  442. if (seg_len >= max_segment ||
  443. page_to_pfn(pages[j]) !=
  444. page_to_pfn(pages[j - 1]) + 1)
  445. break;
  446. }
  447. /* Pass how many chunks might be left */
  448. s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask);
  449. if (IS_ERR(s)) {
  450. /*
  451. * Adjust entry length to be as before function was
  452. * called.
  453. */
  454. if (prv)
  455. prv->length = prv_len;
  456. return s;
  457. }
  458. chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
  459. sg_set_page(s, pages[cur_page],
  460. min_t(unsigned long, size, chunk_size), offset);
  461. added_nents++;
  462. size -= chunk_size;
  463. offset = 0;
  464. cur_page = j;
  465. }
  466. sgt->nents += added_nents;
  467. out:
  468. if (!left_pages)
  469. sg_mark_end(s);
  470. return s;
  471. }
  472. EXPORT_SYMBOL(__sg_alloc_table_from_pages);
  473. /**
  474. * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  475. * an array of pages
  476. * @sgt: The sg table header to use
  477. * @pages: Pointer to an array of page pointers
  478. * @n_pages: Number of pages in the pages array
  479. * @offset: Offset from start of the first page to the start of a buffer
  480. * @size: Number of valid bytes in the buffer (after offset)
  481. * @gfp_mask: GFP allocation mask
  482. *
  483. * Description:
  484. * Allocate and initialize an sg table from a list of pages. Contiguous
  485. * ranges of the pages are squashed into a single scatterlist node. A user
  486. * may provide an offset at a start and a size of valid data in a buffer
  487. * specified by the page array. The returned sg table is released by
  488. * sg_free_table.
  489. *
  490. * Returns:
  491. * 0 on success, negative error on failure
  492. */
  493. int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
  494. unsigned int n_pages, unsigned int offset,
  495. unsigned long size, gfp_t gfp_mask)
  496. {
  497. return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
  498. offset, size, UINT_MAX, NULL, 0, gfp_mask));
  499. }
  500. EXPORT_SYMBOL(sg_alloc_table_from_pages);
  501. #ifdef CONFIG_SGL_ALLOC
  502. /**
  503. * sgl_alloc_order - allocate a scatterlist and its pages
  504. * @length: Length in bytes of the scatterlist. Must be at least one
  505. * @order: Second argument for alloc_pages()
  506. * @chainable: Whether or not to allocate an extra element in the scatterlist
  507. * for scatterlist chaining purposes
  508. * @gfp: Memory allocation flags
  509. * @nent_p: [out] Number of entries in the scatterlist that have pages
  510. *
  511. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  512. */
  513. struct scatterlist *sgl_alloc_order(unsigned long long length,
  514. unsigned int order, bool chainable,
  515. gfp_t gfp, unsigned int *nent_p)
  516. {
  517. struct scatterlist *sgl, *sg;
  518. struct page *page;
  519. unsigned int nent, nalloc;
  520. u32 elem_len;
  521. nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
  522. /* Check for integer overflow */
  523. if (length > (nent << (PAGE_SHIFT + order)))
  524. return NULL;
  525. nalloc = nent;
  526. if (chainable) {
  527. /* Check for integer overflow */
  528. if (nalloc + 1 < nalloc)
  529. return NULL;
  530. nalloc++;
  531. }
  532. sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
  533. gfp & ~GFP_DMA);
  534. if (!sgl)
  535. return NULL;
  536. sg_init_table(sgl, nalloc);
  537. sg = sgl;
  538. while (length) {
  539. elem_len = min_t(u64, length, PAGE_SIZE << order);
  540. page = alloc_pages(gfp, order);
  541. if (!page) {
  542. sgl_free_order(sgl, order);
  543. return NULL;
  544. }
  545. sg_set_page(sg, page, elem_len, 0);
  546. length -= elem_len;
  547. sg = sg_next(sg);
  548. }
  549. WARN_ONCE(length, "length = %lld\n", length);
  550. if (nent_p)
  551. *nent_p = nent;
  552. return sgl;
  553. }
  554. EXPORT_SYMBOL(sgl_alloc_order);
  555. /**
  556. * sgl_alloc - allocate a scatterlist and its pages
  557. * @length: Length in bytes of the scatterlist
  558. * @gfp: Memory allocation flags
  559. * @nent_p: [out] Number of entries in the scatterlist
  560. *
  561. * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  562. */
  563. struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
  564. unsigned int *nent_p)
  565. {
  566. return sgl_alloc_order(length, 0, false, gfp, nent_p);
  567. }
  568. EXPORT_SYMBOL(sgl_alloc);
  569. /**
  570. * sgl_free_n_order - free a scatterlist and its pages
  571. * @sgl: Scatterlist with one or more elements
  572. * @nents: Maximum number of elements to free
  573. * @order: Second argument for __free_pages()
  574. *
  575. * Notes:
  576. * - If several scatterlists have been chained and each chain element is
  577. * freed separately then it's essential to set nents correctly to avoid that a
  578. * page would get freed twice.
  579. * - All pages in a chained scatterlist can be freed at once by setting @nents
  580. * to a high number.
  581. */
  582. void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
  583. {
  584. struct scatterlist *sg;
  585. struct page *page;
  586. int i;
  587. for_each_sg(sgl, sg, nents, i) {
  588. if (!sg)
  589. break;
  590. page = sg_page(sg);
  591. if (page)
  592. __free_pages(page, order);
  593. }
  594. kfree(sgl);
  595. }
  596. EXPORT_SYMBOL(sgl_free_n_order);
  597. /**
  598. * sgl_free_order - free a scatterlist and its pages
  599. * @sgl: Scatterlist with one or more elements
  600. * @order: Second argument for __free_pages()
  601. */
  602. void sgl_free_order(struct scatterlist *sgl, int order)
  603. {
  604. sgl_free_n_order(sgl, INT_MAX, order);
  605. }
  606. EXPORT_SYMBOL(sgl_free_order);
  607. /**
  608. * sgl_free - free a scatterlist and its pages
  609. * @sgl: Scatterlist with one or more elements
  610. */
  611. void sgl_free(struct scatterlist *sgl)
  612. {
  613. sgl_free_order(sgl, 0);
  614. }
  615. EXPORT_SYMBOL(sgl_free);
  616. #endif /* CONFIG_SGL_ALLOC */
  617. void __sg_page_iter_start(struct sg_page_iter *piter,
  618. struct scatterlist *sglist, unsigned int nents,
  619. unsigned long pgoffset)
  620. {
  621. piter->__pg_advance = 0;
  622. piter->__nents = nents;
  623. piter->sg = sglist;
  624. piter->sg_pgoffset = pgoffset;
  625. }
  626. EXPORT_SYMBOL(__sg_page_iter_start);
  627. static int sg_page_count(struct scatterlist *sg)
  628. {
  629. return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
  630. }
  631. bool __sg_page_iter_next(struct sg_page_iter *piter)
  632. {
  633. if (!piter->__nents || !piter->sg)
  634. return false;
  635. piter->sg_pgoffset += piter->__pg_advance;
  636. piter->__pg_advance = 1;
  637. while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
  638. piter->sg_pgoffset -= sg_page_count(piter->sg);
  639. piter->sg = sg_next(piter->sg);
  640. if (!--piter->__nents || !piter->sg)
  641. return false;
  642. }
  643. return true;
  644. }
  645. EXPORT_SYMBOL(__sg_page_iter_next);
  646. static int sg_dma_page_count(struct scatterlist *sg)
  647. {
  648. return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
  649. }
  650. bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
  651. {
  652. struct sg_page_iter *piter = &dma_iter->base;
  653. if (!piter->__nents || !piter->sg)
  654. return false;
  655. piter->sg_pgoffset += piter->__pg_advance;
  656. piter->__pg_advance = 1;
  657. while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
  658. piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
  659. piter->sg = sg_next(piter->sg);
  660. if (!--piter->__nents || !piter->sg)
  661. return false;
  662. }
  663. return true;
  664. }
  665. EXPORT_SYMBOL(__sg_page_iter_dma_next);
  666. /**
  667. * sg_miter_start - start mapping iteration over a sg list
  668. * @miter: sg mapping iter to be started
  669. * @sgl: sg list to iterate over
  670. * @nents: number of sg entries
  671. *
  672. * Description:
  673. * Starts mapping iterator @miter.
  674. *
  675. * Context:
  676. * Don't care.
  677. */
  678. void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
  679. unsigned int nents, unsigned int flags)
  680. {
  681. memset(miter, 0, sizeof(struct sg_mapping_iter));
  682. __sg_page_iter_start(&miter->piter, sgl, nents, 0);
  683. WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
  684. miter->__flags = flags;
  685. }
  686. EXPORT_SYMBOL(sg_miter_start);
  687. static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
  688. {
  689. if (!miter->__remaining) {
  690. struct scatterlist *sg;
  691. if (!__sg_page_iter_next(&miter->piter))
  692. return false;
  693. sg = miter->piter.sg;
  694. miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
  695. miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
  696. miter->__offset &= PAGE_SIZE - 1;
  697. miter->__remaining = sg->offset + sg->length -
  698. (miter->piter.sg_pgoffset << PAGE_SHIFT) -
  699. miter->__offset;
  700. miter->__remaining = min_t(unsigned long, miter->__remaining,
  701. PAGE_SIZE - miter->__offset);
  702. }
  703. return true;
  704. }
  705. /**
  706. * sg_miter_skip - reposition mapping iterator
  707. * @miter: sg mapping iter to be skipped
  708. * @offset: number of bytes to plus the current location
  709. *
  710. * Description:
  711. * Sets the offset of @miter to its current location plus @offset bytes.
  712. * If mapping iterator @miter has been proceeded by sg_miter_next(), this
  713. * stops @miter.
  714. *
  715. * Context:
  716. * Don't care if @miter is stopped, or not proceeded yet.
  717. * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
  718. *
  719. * Returns:
  720. * true if @miter contains the valid mapping. false if end of sg
  721. * list is reached.
  722. */
  723. bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
  724. {
  725. sg_miter_stop(miter);
  726. while (offset) {
  727. off_t consumed;
  728. if (!sg_miter_get_next_page(miter))
  729. return false;
  730. consumed = min_t(off_t, offset, miter->__remaining);
  731. miter->__offset += consumed;
  732. miter->__remaining -= consumed;
  733. offset -= consumed;
  734. }
  735. return true;
  736. }
  737. EXPORT_SYMBOL(sg_miter_skip);
  738. /**
  739. * sg_miter_next - proceed mapping iterator to the next mapping
  740. * @miter: sg mapping iter to proceed
  741. *
  742. * Description:
  743. * Proceeds @miter to the next mapping. @miter should have been started
  744. * using sg_miter_start(). On successful return, @miter->page,
  745. * @miter->addr and @miter->length point to the current mapping.
  746. *
  747. * Context:
  748. * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
  749. * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
  750. *
  751. * Returns:
  752. * true if @miter contains the next mapping. false if end of sg
  753. * list is reached.
  754. */
  755. bool sg_miter_next(struct sg_mapping_iter *miter)
  756. {
  757. sg_miter_stop(miter);
  758. /*
  759. * Get to the next page if necessary.
  760. * __remaining, __offset is adjusted by sg_miter_stop
  761. */
  762. if (!sg_miter_get_next_page(miter))
  763. return false;
  764. miter->page = sg_page_iter_page(&miter->piter);
  765. miter->consumed = miter->length = miter->__remaining;
  766. if (miter->__flags & SG_MITER_ATOMIC)
  767. miter->addr = kmap_atomic(miter->page) + miter->__offset;
  768. else
  769. miter->addr = kmap(miter->page) + miter->__offset;
  770. return true;
  771. }
  772. EXPORT_SYMBOL(sg_miter_next);
  773. /**
  774. * sg_miter_stop - stop mapping iteration
  775. * @miter: sg mapping iter to be stopped
  776. *
  777. * Description:
  778. * Stops mapping iterator @miter. @miter should have been started
  779. * using sg_miter_start(). A stopped iteration can be resumed by
  780. * calling sg_miter_next() on it. This is useful when resources (kmap)
  781. * need to be released during iteration.
  782. *
  783. * Context:
  784. * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
  785. * otherwise.
  786. */
  787. void sg_miter_stop(struct sg_mapping_iter *miter)
  788. {
  789. WARN_ON(miter->consumed > miter->length);
  790. /* drop resources from the last iteration */
  791. if (miter->addr) {
  792. miter->__offset += miter->consumed;
  793. miter->__remaining -= miter->consumed;
  794. if ((miter->__flags & SG_MITER_TO_SG) &&
  795. !PageSlab(miter->page))
  796. flush_kernel_dcache_page(miter->page);
  797. if (miter->__flags & SG_MITER_ATOMIC) {
  798. WARN_ON_ONCE(preemptible());
  799. kunmap_atomic(miter->addr);
  800. } else
  801. kunmap(miter->page);
  802. miter->page = NULL;
  803. miter->addr = NULL;
  804. miter->length = 0;
  805. miter->consumed = 0;
  806. }
  807. }
  808. EXPORT_SYMBOL(sg_miter_stop);
  809. /**
  810. * sg_copy_buffer - Copy data between a linear buffer and an SG list
  811. * @sgl: The SG list
  812. * @nents: Number of SG entries
  813. * @buf: Where to copy from
  814. * @buflen: The number of bytes to copy
  815. * @skip: Number of bytes to skip before copying
  816. * @to_buffer: transfer direction (true == from an sg list to a
  817. * buffer, false == from a buffer to an sg list)
  818. *
  819. * Returns the number of copied bytes.
  820. *
  821. **/
  822. size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
  823. size_t buflen, off_t skip, bool to_buffer)
  824. {
  825. unsigned int offset = 0;
  826. struct sg_mapping_iter miter;
  827. unsigned int sg_flags = SG_MITER_ATOMIC;
  828. if (to_buffer)
  829. sg_flags |= SG_MITER_FROM_SG;
  830. else
  831. sg_flags |= SG_MITER_TO_SG;
  832. sg_miter_start(&miter, sgl, nents, sg_flags);
  833. if (!sg_miter_skip(&miter, skip))
  834. return 0;
  835. while ((offset < buflen) && sg_miter_next(&miter)) {
  836. unsigned int len;
  837. len = min(miter.length, buflen - offset);
  838. if (to_buffer)
  839. memcpy(buf + offset, miter.addr, len);
  840. else
  841. memcpy(miter.addr, buf + offset, len);
  842. offset += len;
  843. }
  844. sg_miter_stop(&miter);
  845. return offset;
  846. }
  847. EXPORT_SYMBOL(sg_copy_buffer);
  848. /**
  849. * sg_copy_from_buffer - Copy from a linear buffer to an SG list
  850. * @sgl: The SG list
  851. * @nents: Number of SG entries
  852. * @buf: Where to copy from
  853. * @buflen: The number of bytes to copy
  854. *
  855. * Returns the number of copied bytes.
  856. *
  857. **/
  858. size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  859. const void *buf, size_t buflen)
  860. {
  861. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
  862. }
  863. EXPORT_SYMBOL(sg_copy_from_buffer);
  864. /**
  865. * sg_copy_to_buffer - Copy from an SG list to a linear buffer
  866. * @sgl: The SG list
  867. * @nents: Number of SG entries
  868. * @buf: Where to copy to
  869. * @buflen: The number of bytes to copy
  870. *
  871. * Returns the number of copied bytes.
  872. *
  873. **/
  874. size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  875. void *buf, size_t buflen)
  876. {
  877. return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
  878. }
  879. EXPORT_SYMBOL(sg_copy_to_buffer);
  880. /**
  881. * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
  882. * @sgl: The SG list
  883. * @nents: Number of SG entries
  884. * @buf: Where to copy from
  885. * @buflen: The number of bytes to copy
  886. * @skip: Number of bytes to skip before copying
  887. *
  888. * Returns the number of copied bytes.
  889. *
  890. **/
  891. size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
  892. const void *buf, size_t buflen, off_t skip)
  893. {
  894. return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
  895. }
  896. EXPORT_SYMBOL(sg_pcopy_from_buffer);
  897. /**
  898. * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
  899. * @sgl: The SG list
  900. * @nents: Number of SG entries
  901. * @buf: Where to copy to
  902. * @buflen: The number of bytes to copy
  903. * @skip: Number of bytes to skip before copying
  904. *
  905. * Returns the number of copied bytes.
  906. *
  907. **/
  908. size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  909. void *buf, size_t buflen, off_t skip)
  910. {
  911. return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
  912. }
  913. EXPORT_SYMBOL(sg_pcopy_to_buffer);
  914. /**
  915. * sg_zero_buffer - Zero-out a part of a SG list
  916. * @sgl: The SG list
  917. * @nents: Number of SG entries
  918. * @buflen: The number of bytes to zero out
  919. * @skip: Number of bytes to skip before zeroing
  920. *
  921. * Returns the number of bytes zeroed.
  922. **/
  923. size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
  924. size_t buflen, off_t skip)
  925. {
  926. unsigned int offset = 0;
  927. struct sg_mapping_iter miter;
  928. unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
  929. sg_miter_start(&miter, sgl, nents, sg_flags);
  930. if (!sg_miter_skip(&miter, skip))
  931. return false;
  932. while (offset < buflen && sg_miter_next(&miter)) {
  933. unsigned int len;
  934. len = min(miter.length, buflen - offset);
  935. memset(miter.addr, 0, len);
  936. offset += len;
  937. }
  938. sg_miter_stop(&miter);
  939. return offset;
  940. }
  941. EXPORT_SYMBOL(sg_zero_buffer);