cma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Contiguous Memory Allocator
  4. *
  5. * Copyright (c) 2010-2011 by Samsung Electronics.
  6. * Copyright IBM Corporation, 2013
  7. * Copyright LG Electronics Inc., 2014
  8. * Written by:
  9. * Marek Szyprowski <m.szyprowski@samsung.com>
  10. * Michal Nazarewicz <mina86@mina86.com>
  11. * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  12. * Joonsoo Kim <iamjoonsoo.kim@lge.com>
  13. */
  14. #define pr_fmt(fmt) "cma: " fmt
  15. #ifdef CONFIG_CMA_DEBUG
  16. #ifndef DEBUG
  17. # define DEBUG
  18. #endif
  19. #endif
  20. #define CREATE_TRACE_POINTS
  21. #include <linux/memblock.h>
  22. #include <linux/err.h>
  23. #include <linux/mm.h>
  24. #include <linux/module.h>
  25. #include <linux/mutex.h>
  26. #include <linux/sizes.h>
  27. #include <linux/slab.h>
  28. #include <linux/log2.h>
  29. #include <linux/cma.h>
  30. #include <linux/highmem.h>
  31. #include <linux/io.h>
  32. #include <linux/kmemleak.h>
  33. #include <linux/sched.h>
  34. #include <linux/jiffies.h>
  35. #include <trace/events/cma.h>
  36. #undef CREATE_TRACE_POINTS
  37. #include <trace/hooks/mm.h>
  38. #include "cma.h"
  39. extern void lru_cache_disable(void);
  40. extern void lru_cache_enable(void);
  41. struct cma cma_areas[MAX_CMA_AREAS];
  42. unsigned cma_area_count;
  43. phys_addr_t cma_get_base(const struct cma *cma)
  44. {
  45. return PFN_PHYS(cma->base_pfn);
  46. }
  47. unsigned long cma_get_size(const struct cma *cma)
  48. {
  49. return cma->count << PAGE_SHIFT;
  50. }
  51. const char *cma_get_name(const struct cma *cma)
  52. {
  53. return cma->name;
  54. }
  55. EXPORT_SYMBOL_GPL(cma_get_name);
  56. static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
  57. unsigned int align_order)
  58. {
  59. if (align_order <= cma->order_per_bit)
  60. return 0;
  61. return (1UL << (align_order - cma->order_per_bit)) - 1;
  62. }
  63. /*
  64. * Find the offset of the base PFN from the specified align_order.
  65. * The value returned is represented in order_per_bits.
  66. */
  67. static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
  68. unsigned int align_order)
  69. {
  70. return (cma->base_pfn & ((1UL << align_order) - 1))
  71. >> cma->order_per_bit;
  72. }
  73. static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
  74. unsigned long pages)
  75. {
  76. return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
  77. }
  78. static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
  79. unsigned int count)
  80. {
  81. unsigned long bitmap_no, bitmap_count;
  82. bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
  83. bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  84. mutex_lock(&cma->lock);
  85. bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
  86. mutex_unlock(&cma->lock);
  87. }
  88. static void __init cma_activate_area(struct cma *cma)
  89. {
  90. unsigned long base_pfn = cma->base_pfn, pfn;
  91. struct zone *zone;
  92. cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
  93. if (!cma->bitmap)
  94. goto out_error;
  95. /*
  96. * alloc_contig_range() requires the pfn range specified to be in the
  97. * same zone. Simplify by forcing the entire CMA resv range to be in the
  98. * same zone.
  99. */
  100. WARN_ON_ONCE(!pfn_valid(base_pfn));
  101. zone = page_zone(pfn_to_page(base_pfn));
  102. for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
  103. WARN_ON_ONCE(!pfn_valid(pfn));
  104. if (page_zone(pfn_to_page(pfn)) != zone)
  105. goto not_in_zone;
  106. }
  107. for (pfn = base_pfn; pfn < base_pfn + cma->count;
  108. pfn += pageblock_nr_pages)
  109. init_cma_reserved_pageblock(pfn_to_page(pfn));
  110. mutex_init(&cma->lock);
  111. #ifdef CONFIG_CMA_DEBUGFS
  112. INIT_HLIST_HEAD(&cma->mem_head);
  113. spin_lock_init(&cma->mem_head_lock);
  114. #endif
  115. return;
  116. not_in_zone:
  117. bitmap_free(cma->bitmap);
  118. out_error:
  119. /* Expose all pages to the buddy, they are useless for CMA. */
  120. for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
  121. free_reserved_page(pfn_to_page(pfn));
  122. totalcma_pages -= cma->count;
  123. cma->count = 0;
  124. pr_err("CMA area %s could not be activated\n", cma->name);
  125. return;
  126. }
  127. static int __init cma_init_reserved_areas(void)
  128. {
  129. int i;
  130. for (i = 0; i < cma_area_count; i++)
  131. cma_activate_area(&cma_areas[i]);
  132. return 0;
  133. }
  134. core_initcall(cma_init_reserved_areas);
  135. /**
  136. * cma_init_reserved_mem() - create custom contiguous area from reserved memory
  137. * @base: Base address of the reserved area
  138. * @size: Size of the reserved area (in bytes),
  139. * @order_per_bit: Order of pages represented by one bit on bitmap.
  140. * @name: The name of the area. If this parameter is NULL, the name of
  141. * the area will be set to "cmaN", where N is a running counter of
  142. * used areas.
  143. * @res_cma: Pointer to store the created cma region.
  144. *
  145. * This function creates custom contiguous area from already reserved memory.
  146. */
  147. int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  148. unsigned int order_per_bit,
  149. const char *name,
  150. struct cma **res_cma)
  151. {
  152. struct cma *cma;
  153. phys_addr_t alignment;
  154. /* Sanity checks */
  155. if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  156. pr_err("Not enough slots for CMA reserved regions!\n");
  157. return -ENOSPC;
  158. }
  159. if (!size || !memblock_is_region_reserved(base, size))
  160. return -EINVAL;
  161. /* ensure minimal alignment required by mm core */
  162. alignment = PAGE_SIZE <<
  163. max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
  164. /* alignment should be aligned with order_per_bit */
  165. if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
  166. return -EINVAL;
  167. if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
  168. return -EINVAL;
  169. /*
  170. * Each reserved area must be initialised later, when more kernel
  171. * subsystems (like slab allocator) are available.
  172. */
  173. cma = &cma_areas[cma_area_count];
  174. if (name)
  175. snprintf(cma->name, CMA_MAX_NAME, name);
  176. else
  177. snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
  178. cma->base_pfn = PFN_DOWN(base);
  179. cma->count = size >> PAGE_SHIFT;
  180. cma->order_per_bit = order_per_bit;
  181. *res_cma = cma;
  182. cma_area_count++;
  183. totalcma_pages += (size / PAGE_SIZE);
  184. return 0;
  185. }
  186. /**
  187. * cma_declare_contiguous_nid() - reserve custom contiguous area
  188. * @base: Base address of the reserved area optional, use 0 for any
  189. * @size: Size of the reserved area (in bytes),
  190. * @limit: End address of the reserved memory (optional, 0 for any).
  191. * @alignment: Alignment for the CMA area, should be power of 2 or zero
  192. * @order_per_bit: Order of pages represented by one bit on bitmap.
  193. * @fixed: hint about where to place the reserved area
  194. * @name: The name of the area. See function cma_init_reserved_mem()
  195. * @res_cma: Pointer to store the created cma region.
  196. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  197. *
  198. * This function reserves memory from early allocator. It should be
  199. * called by arch specific code once the early allocator (memblock or bootmem)
  200. * has been activated and all other subsystems have already allocated/reserved
  201. * memory. This function allows to create custom reserved areas.
  202. *
  203. * If @fixed is true, reserve contiguous area at exactly @base. If false,
  204. * reserve in range from @base to @limit.
  205. */
  206. int __init cma_declare_contiguous_nid(phys_addr_t base,
  207. phys_addr_t size, phys_addr_t limit,
  208. phys_addr_t alignment, unsigned int order_per_bit,
  209. bool fixed, const char *name, struct cma **res_cma,
  210. int nid)
  211. {
  212. phys_addr_t memblock_end = memblock_end_of_DRAM();
  213. phys_addr_t highmem_start;
  214. int ret = 0;
  215. /*
  216. * We can't use __pa(high_memory) directly, since high_memory
  217. * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
  218. * complain. Find the boundary by adding one to the last valid
  219. * address.
  220. */
  221. highmem_start = __pa(high_memory - 1) + 1;
  222. pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
  223. __func__, &size, &base, &limit, &alignment);
  224. if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  225. pr_err("Not enough slots for CMA reserved regions!\n");
  226. return -ENOSPC;
  227. }
  228. if (!size)
  229. return -EINVAL;
  230. if (alignment && !is_power_of_2(alignment))
  231. return -EINVAL;
  232. /*
  233. * Sanitise input arguments.
  234. * Pages both ends in CMA area could be merged into adjacent unmovable
  235. * migratetype page by page allocator's buddy algorithm. In the case,
  236. * you couldn't get a contiguous memory, which is not what we want.
  237. */
  238. alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
  239. max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
  240. if (fixed && base & (alignment - 1)) {
  241. ret = -EINVAL;
  242. pr_err("Region at %pa must be aligned to %pa bytes\n",
  243. &base, &alignment);
  244. goto err;
  245. }
  246. base = ALIGN(base, alignment);
  247. size = ALIGN(size, alignment);
  248. limit &= ~(alignment - 1);
  249. if (!base)
  250. fixed = false;
  251. /* size should be aligned with order_per_bit */
  252. if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
  253. return -EINVAL;
  254. /*
  255. * If allocating at a fixed base the request region must not cross the
  256. * low/high memory boundary.
  257. */
  258. if (fixed && base < highmem_start && base + size > highmem_start) {
  259. ret = -EINVAL;
  260. pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
  261. &base, &highmem_start);
  262. goto err;
  263. }
  264. /*
  265. * If the limit is unspecified or above the memblock end, its effective
  266. * value will be the memblock end. Set it explicitly to simplify further
  267. * checks.
  268. */
  269. if (limit == 0 || limit > memblock_end)
  270. limit = memblock_end;
  271. if (base + size > limit) {
  272. ret = -EINVAL;
  273. pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
  274. &size, &base, &limit);
  275. goto err;
  276. }
  277. /* Reserve memory */
  278. if (fixed) {
  279. if (memblock_is_region_reserved(base, size) ||
  280. memblock_reserve(base, size) < 0) {
  281. ret = -EBUSY;
  282. goto err;
  283. }
  284. } else {
  285. phys_addr_t addr = 0;
  286. /*
  287. * All pages in the reserved area must come from the same zone.
  288. * If the requested region crosses the low/high memory boundary,
  289. * try allocating from high memory first and fall back to low
  290. * memory in case of failure.
  291. */
  292. if (base < highmem_start && limit > highmem_start) {
  293. addr = memblock_alloc_range_nid(size, alignment,
  294. highmem_start, limit, nid, true);
  295. limit = highmem_start;
  296. }
  297. /*
  298. * If there is enough memory, try a bottom-up allocation first.
  299. * It will place the new cma area close to the start of the node
  300. * and guarantee that the compaction is moving pages out of the
  301. * cma area and not into it.
  302. * Avoid using first 4GB to not interfere with constrained zones
  303. * like DMA/DMA32.
  304. */
  305. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  306. if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
  307. memblock_set_bottom_up(true);
  308. addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
  309. limit, nid, true);
  310. memblock_set_bottom_up(false);
  311. }
  312. #endif
  313. if (!addr) {
  314. addr = memblock_alloc_range_nid(size, alignment, base,
  315. limit, nid, true);
  316. if (!addr) {
  317. ret = -ENOMEM;
  318. goto err;
  319. }
  320. }
  321. /*
  322. * kmemleak scans/reads tracked objects for pointers to other
  323. * objects but this address isn't mapped and accessible
  324. */
  325. kmemleak_ignore_phys(addr);
  326. base = addr;
  327. }
  328. ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
  329. if (ret)
  330. goto free_mem;
  331. pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
  332. &base);
  333. return 0;
  334. free_mem:
  335. memblock_free(base, size);
  336. err:
  337. pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
  338. return ret;
  339. }
  340. #ifdef CONFIG_CMA_DEBUG
  341. static void cma_debug_show_areas(struct cma *cma)
  342. {
  343. unsigned long next_zero_bit, next_set_bit, nr_zero;
  344. unsigned long start = 0;
  345. unsigned long nr_part, nr_total = 0;
  346. unsigned long nbits = cma_bitmap_maxno(cma);
  347. mutex_lock(&cma->lock);
  348. pr_info("number of available pages: ");
  349. for (;;) {
  350. next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
  351. if (next_zero_bit >= nbits)
  352. break;
  353. next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
  354. nr_zero = next_set_bit - next_zero_bit;
  355. nr_part = nr_zero << cma->order_per_bit;
  356. pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
  357. next_zero_bit);
  358. nr_total += nr_part;
  359. start = next_zero_bit + nr_zero;
  360. }
  361. pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
  362. mutex_unlock(&cma->lock);
  363. }
  364. #else
  365. static inline void cma_debug_show_areas(struct cma *cma) { }
  366. #endif
  367. /**
  368. * cma_alloc() - allocate pages from contiguous area
  369. * @cma: Contiguous memory region for which the allocation is performed.
  370. * @count: Requested number of pages.
  371. * @align: Requested alignment of pages (in PAGE_SIZE order).
  372. * @gfp_mask: GFP mask to use during the cma allocation.
  373. *
  374. * This function allocates part of contiguous memory on specific
  375. * contiguous memory area.
  376. */
  377. struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
  378. gfp_t gfp_mask)
  379. {
  380. unsigned long mask, offset;
  381. unsigned long pfn = -1;
  382. unsigned long start = 0;
  383. unsigned long bitmap_maxno, bitmap_no, bitmap_count;
  384. size_t i;
  385. struct page *page = NULL;
  386. int ret = -ENOMEM;
  387. int num_attempts = 0;
  388. int max_retries = 5;
  389. s64 ts;
  390. struct cma_alloc_info cma_info = {0};
  391. trace_android_vh_cma_alloc_start(&ts);
  392. if (!cma || !cma->count || !cma->bitmap)
  393. goto out;
  394. pr_debug("%s(cma %p, count %zu, align %d gfp_mask 0x%x)\n", __func__,
  395. (void *)cma, count, align, gfp_mask);
  396. if (!count)
  397. goto out;
  398. trace_cma_alloc_start(cma->name, count, align);
  399. mask = cma_bitmap_aligned_mask(cma, align);
  400. offset = cma_bitmap_aligned_offset(cma, align);
  401. bitmap_maxno = cma_bitmap_maxno(cma);
  402. bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  403. if (bitmap_count > bitmap_maxno)
  404. goto out;
  405. lru_cache_disable();
  406. for (;;) {
  407. struct acr_info info = {0};
  408. mutex_lock(&cma->lock);
  409. bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
  410. bitmap_maxno, start, bitmap_count, mask,
  411. offset);
  412. if (bitmap_no >= bitmap_maxno) {
  413. if ((num_attempts < max_retries) && (ret == -EBUSY)) {
  414. mutex_unlock(&cma->lock);
  415. if (fatal_signal_pending(current) ||
  416. (gfp_mask & __GFP_NORETRY))
  417. break;
  418. /*
  419. * Page may be momentarily pinned by some other
  420. * process which has been scheduled out, e.g.
  421. * in exit path, during unmap call, or process
  422. * fork and so cannot be freed there. Sleep
  423. * for 100ms and retry the allocation.
  424. */
  425. start = 0;
  426. ret = -ENOMEM;
  427. schedule_timeout_killable(msecs_to_jiffies(100));
  428. num_attempts++;
  429. continue;
  430. } else {
  431. mutex_unlock(&cma->lock);
  432. break;
  433. }
  434. }
  435. bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
  436. /*
  437. * It's safe to drop the lock here. We've marked this region for
  438. * our exclusive use. If the migration fails we will take the
  439. * lock again and unmark it.
  440. */
  441. mutex_unlock(&cma->lock);
  442. pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
  443. ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask, &info);
  444. cma_info.nr_migrated += info.nr_migrated;
  445. cma_info.nr_reclaimed += info.nr_reclaimed;
  446. cma_info.nr_mapped += info.nr_mapped;
  447. if (info.err) {
  448. if (info.err & ACR_ERR_ISOLATE)
  449. cma_info.nr_isolate_fail++;
  450. if (info.err & ACR_ERR_MIGRATE)
  451. cma_info.nr_migrate_fail++;
  452. if (info.err & ACR_ERR_TEST)
  453. cma_info.nr_test_fail++;
  454. }
  455. if (ret == 0) {
  456. page = pfn_to_page(pfn);
  457. break;
  458. }
  459. cma_clear_bitmap(cma, pfn, count);
  460. if (ret != -EBUSY)
  461. break;
  462. pr_debug("%s(): memory range at %p is busy, retrying\n",
  463. __func__, pfn_to_page(pfn));
  464. trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
  465. count, align);
  466. if (info.failed_pfn && gfp_mask & __GFP_NORETRY) {
  467. /* try again from following failed page */
  468. start = (pfn_max_align_up(info.failed_pfn + 1) -
  469. cma->base_pfn) >> cma->order_per_bit;
  470. } else {
  471. /* try again with a bit different memory target */
  472. start = bitmap_no + mask + 1;
  473. }
  474. }
  475. lru_cache_enable();
  476. trace_cma_alloc_finish(cma->name, pfn, page, count, align);
  477. trace_cma_alloc_info(cma->name, page, count, align, &cma_info);
  478. /*
  479. * CMA can allocate multiple page blocks, which results in different
  480. * blocks being marked with different tags. Reset the tags to ignore
  481. * those page blocks.
  482. */
  483. if (page) {
  484. for (i = 0; i < count; i++)
  485. page_kasan_tag_reset(page + i);
  486. }
  487. if (ret && !(gfp_mask & __GFP_NOWARN)) {
  488. pr_err("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
  489. __func__, cma->name, count, ret);
  490. cma_debug_show_areas(cma);
  491. }
  492. pr_debug("%s(): returned %p\n", __func__, page);
  493. out:
  494. trace_android_vh_cma_alloc_finish(cma, page, count, align, gfp_mask, ts);
  495. if (page) {
  496. count_vm_event(CMA_ALLOC_SUCCESS);
  497. cma_sysfs_account_success_pages(cma, count);
  498. } else {
  499. count_vm_event(CMA_ALLOC_FAIL);
  500. if (cma)
  501. cma_sysfs_account_fail_pages(cma, count);
  502. }
  503. return page;
  504. }
  505. EXPORT_SYMBOL_GPL(cma_alloc);
  506. /**
  507. * cma_release() - release allocated pages
  508. * @cma: Contiguous memory region for which the allocation is performed.
  509. * @pages: Allocated pages.
  510. * @count: Number of allocated pages.
  511. *
  512. * This function releases memory allocated by cma_alloc().
  513. * It returns false when provided pages do not belong to contiguous area and
  514. * true otherwise.
  515. */
  516. bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
  517. {
  518. unsigned long pfn;
  519. if (!cma || !pages)
  520. return false;
  521. pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count);
  522. pfn = page_to_pfn(pages);
  523. if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
  524. return false;
  525. VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
  526. free_contig_range(pfn, count);
  527. cma_clear_bitmap(cma, pfn, count);
  528. trace_cma_release(cma->name, pfn, pages, count);
  529. return true;
  530. }
  531. EXPORT_SYMBOL_GPL(cma_release);
  532. int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
  533. {
  534. int i;
  535. for (i = 0; i < cma_area_count; i++) {
  536. int ret = it(&cma_areas[i], data);
  537. if (ret)
  538. return ret;
  539. }
  540. return 0;
  541. }
  542. EXPORT_SYMBOL_GPL(cma_for_each_area);