page_isolation.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/mm/page_isolation.c
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/page-isolation.h>
  7. #include <linux/pageblock-flags.h>
  8. #include <linux/memory.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/page_owner.h>
  11. #include <linux/migrate.h>
  12. #include "internal.h"
  13. #define CREATE_TRACE_POINTS
  14. #include <trace/events/page_isolation.h>
  15. static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
  16. {
  17. struct zone *zone = page_zone(page);
  18. struct page *unmovable;
  19. unsigned long flags;
  20. spin_lock_irqsave(&zone->lock, flags);
  21. /*
  22. * We assume the caller intended to SET migrate type to isolate.
  23. * If it is already set, then someone else must have raced and
  24. * set it before us.
  25. */
  26. if (is_migrate_isolate_page(page)) {
  27. spin_unlock_irqrestore(&zone->lock, flags);
  28. return -EBUSY;
  29. }
  30. /*
  31. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  32. * We just check MOVABLE pages.
  33. */
  34. unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
  35. if (!unmovable) {
  36. unsigned long nr_pages;
  37. int mt = get_pageblock_migratetype(page);
  38. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  39. zone->nr_isolate_pageblock++;
  40. nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
  41. NULL);
  42. __mod_zone_freepage_state(zone, -nr_pages, mt);
  43. spin_unlock_irqrestore(&zone->lock, flags);
  44. return 0;
  45. }
  46. spin_unlock_irqrestore(&zone->lock, flags);
  47. if (isol_flags & REPORT_FAILURE) {
  48. /*
  49. * printk() with zone->lock held will likely trigger a
  50. * lockdep splat, so defer it here.
  51. */
  52. dump_page(unmovable, "unmovable page");
  53. }
  54. return -EBUSY;
  55. }
  56. static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  57. {
  58. struct zone *zone;
  59. unsigned long flags, nr_pages;
  60. bool isolated_page = false;
  61. unsigned int order;
  62. unsigned long pfn, buddy_pfn;
  63. struct page *buddy;
  64. zone = page_zone(page);
  65. spin_lock_irqsave(&zone->lock, flags);
  66. if (!is_migrate_isolate_page(page))
  67. goto out;
  68. /*
  69. * Because freepage with more than pageblock_order on isolated
  70. * pageblock is restricted to merge due to freepage counting problem,
  71. * it is possible that there is free buddy page.
  72. * move_freepages_block() doesn't care of merge so we need other
  73. * approach in order to merge them. Isolation and free will make
  74. * these pages to be merged.
  75. */
  76. if (PageBuddy(page)) {
  77. order = buddy_order(page);
  78. if (order >= pageblock_order) {
  79. pfn = page_to_pfn(page);
  80. buddy_pfn = __find_buddy_pfn(pfn, order);
  81. buddy = page + (buddy_pfn - pfn);
  82. if (pfn_valid_within(buddy_pfn) &&
  83. !is_migrate_isolate_page(buddy)) {
  84. __isolate_free_page(page, order);
  85. isolated_page = true;
  86. }
  87. }
  88. }
  89. /*
  90. * If we isolate freepage with more than pageblock_order, there
  91. * should be no freepage in the range, so we could avoid costly
  92. * pageblock scanning for freepage moving.
  93. *
  94. * We didn't actually touch any of the isolated pages, so place them
  95. * to the tail of the freelist. This is an optimization for memory
  96. * onlining - just onlined memory won't immediately be considered for
  97. * allocation.
  98. */
  99. if (!isolated_page) {
  100. nr_pages = move_freepages_block(zone, page, migratetype, NULL);
  101. __mod_zone_freepage_state(zone, nr_pages, migratetype);
  102. }
  103. set_pageblock_migratetype(page, migratetype);
  104. if (isolated_page)
  105. __putback_isolated_page(page, order, migratetype);
  106. zone->nr_isolate_pageblock--;
  107. out:
  108. spin_unlock_irqrestore(&zone->lock, flags);
  109. }
  110. static inline struct page *
  111. __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  112. {
  113. int i;
  114. for (i = 0; i < nr_pages; i++) {
  115. struct page *page;
  116. page = pfn_to_online_page(pfn + i);
  117. if (!page)
  118. continue;
  119. return page;
  120. }
  121. return NULL;
  122. }
  123. /**
  124. * start_isolate_page_range() - make page-allocation-type of range of pages to
  125. * be MIGRATE_ISOLATE.
  126. * @start_pfn: The lower PFN of the range to be isolated.
  127. * @end_pfn: The upper PFN of the range to be isolated.
  128. * start_pfn/end_pfn must be aligned to pageblock_order.
  129. * @migratetype: Migrate type to set in error recovery.
  130. * @flags: The following flags are allowed (they can be combined in
  131. * a bit mask)
  132. * MEMORY_OFFLINE - isolate to offline (!allocate) memory
  133. * e.g., skip over PageHWPoison() pages
  134. * and PageOffline() pages.
  135. * REPORT_FAILURE - report details about the failure to
  136. * isolate the range
  137. *
  138. * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  139. * the range will never be allocated. Any free pages and pages freed in the
  140. * future will not be allocated again. If specified range includes migrate types
  141. * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
  142. * pages in the range finally, the caller have to free all pages in the range.
  143. * test_page_isolated() can be used for test it.
  144. *
  145. * There is no high level synchronization mechanism that prevents two threads
  146. * from trying to isolate overlapping ranges. If this happens, one thread
  147. * will notice pageblocks in the overlapping range already set to isolate.
  148. * This happens in set_migratetype_isolate, and set_migratetype_isolate
  149. * returns an error. We then clean up by restoring the migration type on
  150. * pageblocks we may have modified and return -EBUSY to caller. This
  151. * prevents two threads from simultaneously working on overlapping ranges.
  152. *
  153. * Please note that there is no strong synchronization with the page allocator
  154. * either. Pages might be freed while their page blocks are marked ISOLATED.
  155. * A call to drain_all_pages() after isolation can flush most of them. However
  156. * in some cases pages might still end up on pcp lists and that would allow
  157. * for their allocation even when they are in fact isolated already. Depending
  158. * on how strong of a guarantee the caller needs, further drain_all_pages()
  159. * might be needed (e.g. __offline_pages will need to call it after check for
  160. * isolated range for a next retry).
  161. *
  162. * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
  163. */
  164. int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  165. unsigned migratetype, int flags,
  166. unsigned long *failed_pfn)
  167. {
  168. unsigned long pfn;
  169. unsigned long undo_pfn;
  170. struct page *page;
  171. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  172. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  173. for (pfn = start_pfn;
  174. pfn < end_pfn;
  175. pfn += pageblock_nr_pages) {
  176. page = __first_valid_page(pfn, pageblock_nr_pages);
  177. if (page) {
  178. if (set_migratetype_isolate(page, migratetype, flags)) {
  179. undo_pfn = pfn;
  180. if (failed_pfn)
  181. *failed_pfn = page_to_pfn(page);
  182. goto undo;
  183. }
  184. }
  185. }
  186. return 0;
  187. undo:
  188. for (pfn = start_pfn;
  189. pfn < undo_pfn;
  190. pfn += pageblock_nr_pages) {
  191. struct page *page = pfn_to_online_page(pfn);
  192. if (!page)
  193. continue;
  194. unset_migratetype_isolate(page, migratetype);
  195. }
  196. return -EBUSY;
  197. }
  198. /*
  199. * Make isolated pages available again.
  200. */
  201. void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  202. unsigned migratetype)
  203. {
  204. unsigned long pfn;
  205. struct page *page;
  206. BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
  207. BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
  208. for (pfn = start_pfn;
  209. pfn < end_pfn;
  210. pfn += pageblock_nr_pages) {
  211. page = __first_valid_page(pfn, pageblock_nr_pages);
  212. if (!page || !is_migrate_isolate_page(page))
  213. continue;
  214. unset_migratetype_isolate(page, migratetype);
  215. }
  216. }
  217. /*
  218. * Test all pages in the range is free(means isolated) or not.
  219. * all pages in [start_pfn...end_pfn) must be in the same zone.
  220. * zone->lock must be held before call this.
  221. *
  222. * Returns the last tested pfn.
  223. */
  224. static unsigned long
  225. __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
  226. int flags)
  227. {
  228. struct page *page;
  229. while (pfn < end_pfn) {
  230. if (!pfn_valid_within(pfn)) {
  231. pfn++;
  232. continue;
  233. }
  234. page = pfn_to_page(pfn);
  235. if (PageBuddy(page))
  236. /*
  237. * If the page is on a free list, it has to be on
  238. * the correct MIGRATE_ISOLATE freelist. There is no
  239. * simple way to verify that as VM_BUG_ON(), though.
  240. */
  241. pfn += 1 << buddy_order(page);
  242. else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
  243. /* A HWPoisoned page cannot be also PageBuddy */
  244. pfn++;
  245. else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
  246. !page_count(page))
  247. /*
  248. * The responsible driver agreed to skip PageOffline()
  249. * pages when offlining memory by dropping its
  250. * reference in MEM_GOING_OFFLINE.
  251. */
  252. pfn++;
  253. else
  254. break;
  255. }
  256. return pfn;
  257. }
  258. /* Caller should ensure that requested range is in a single zone */
  259. int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
  260. int isol_flags, unsigned long *failed_pfn)
  261. {
  262. unsigned long pfn, flags;
  263. struct page *page;
  264. struct zone *zone;
  265. /*
  266. * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
  267. * are not aligned to pageblock_nr_pages.
  268. * Then we just check migratetype first.
  269. */
  270. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  271. page = __first_valid_page(pfn, pageblock_nr_pages);
  272. if (page && !is_migrate_isolate_page(page))
  273. break;
  274. }
  275. page = __first_valid_page(start_pfn, end_pfn - start_pfn);
  276. if ((pfn < end_pfn) || !page)
  277. return -EBUSY;
  278. /* Check all pages are free or marked as ISOLATED */
  279. zone = page_zone(page);
  280. spin_lock_irqsave(&zone->lock, flags);
  281. pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
  282. spin_unlock_irqrestore(&zone->lock, flags);
  283. trace_test_pages_isolated(start_pfn, end_pfn, pfn);
  284. if (pfn < end_pfn) {
  285. page_pinner_failure_detect(pfn_to_page(pfn));
  286. if (failed_pfn)
  287. *failed_pfn = pfn;
  288. return -EBUSY;
  289. }
  290. return 0;
  291. }