memory_hotplug.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/mm/memory_hotplug.c
  4. *
  5. * Copyright (C)
  6. */
  7. #include <linux/stddef.h>
  8. #include <linux/mm.h>
  9. #include <linux/sched/signal.h>
  10. #include <linux/swap.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/compiler.h>
  14. #include <linux/export.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/writeback.h>
  17. #include <linux/slab.h>
  18. #include <linux/sysctl.h>
  19. #include <linux/cpu.h>
  20. #include <linux/memory.h>
  21. #include <linux/memremap.h>
  22. #include <linux/memory_hotplug.h>
  23. #include <linux/highmem.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/ioport.h>
  26. #include <linux/delay.h>
  27. #include <linux/migrate.h>
  28. #include <linux/page-isolation.h>
  29. #include <linux/pfn.h>
  30. #include <linux/suspend.h>
  31. #include <linux/mm_inline.h>
  32. #include <linux/firmware-map.h>
  33. #include <linux/stop_machine.h>
  34. #include <linux/hugetlb.h>
  35. #include <linux/memblock.h>
  36. #include <linux/compaction.h>
  37. #include <linux/rmap.h>
  38. #include <asm/tlbflush.h>
  39. #include "internal.h"
  40. #include "shuffle.h"
  41. /*
  42. * online_page_callback contains pointer to current page onlining function.
  43. * Initially it is generic_online_page(). If it is required it could be
  44. * changed by calling set_online_page_callback() for callback registration
  45. * and restore_online_page_callback() for generic callback restore.
  46. */
  47. static online_page_callback_t online_page_callback = generic_online_page;
  48. static DEFINE_MUTEX(online_page_callback_lock);
  49. DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
  50. void get_online_mems(void)
  51. {
  52. percpu_down_read(&mem_hotplug_lock);
  53. }
  54. void put_online_mems(void)
  55. {
  56. percpu_up_read(&mem_hotplug_lock);
  57. }
  58. bool movable_node_enabled = false;
  59. #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
  60. int memhp_default_online_type = MMOP_OFFLINE;
  61. #else
  62. int memhp_default_online_type = MMOP_ONLINE;
  63. #endif
  64. static int __init setup_memhp_default_state(char *str)
  65. {
  66. const int online_type = memhp_online_type_from_str(str);
  67. if (online_type >= 0)
  68. memhp_default_online_type = online_type;
  69. return 1;
  70. }
  71. __setup("memhp_default_state=", setup_memhp_default_state);
  72. void mem_hotplug_begin(void)
  73. {
  74. cpus_read_lock();
  75. percpu_down_write(&mem_hotplug_lock);
  76. }
  77. void mem_hotplug_done(void)
  78. {
  79. percpu_up_write(&mem_hotplug_lock);
  80. cpus_read_unlock();
  81. }
  82. u64 max_mem_size = U64_MAX;
  83. /* add this memory to iomem resource */
  84. static struct resource *register_memory_resource(u64 start, u64 size,
  85. const char *resource_name)
  86. {
  87. struct resource *res;
  88. unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  89. if (strcmp(resource_name, "System RAM"))
  90. flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED;
  91. /*
  92. * Make sure value parsed from 'mem=' only restricts memory adding
  93. * while booting, so that memory hotplug won't be impacted. Please
  94. * refer to document of 'mem=' in kernel-parameters.txt for more
  95. * details.
  96. */
  97. if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
  98. return ERR_PTR(-E2BIG);
  99. /*
  100. * Request ownership of the new memory range. This might be
  101. * a child of an existing resource that was present but
  102. * not marked as busy.
  103. */
  104. res = __request_region(&iomem_resource, start, size,
  105. resource_name, flags);
  106. if (!res) {
  107. pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
  108. start, start + size);
  109. return ERR_PTR(-EEXIST);
  110. }
  111. return res;
  112. }
  113. static void release_memory_resource(struct resource *res)
  114. {
  115. if (!res)
  116. return;
  117. release_resource(res);
  118. kfree(res);
  119. }
  120. #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  121. void get_page_bootmem(unsigned long info, struct page *page,
  122. unsigned long type)
  123. {
  124. page->freelist = (void *)type;
  125. SetPagePrivate(page);
  126. set_page_private(page, info);
  127. page_ref_inc(page);
  128. }
  129. void put_page_bootmem(struct page *page)
  130. {
  131. unsigned long type;
  132. type = (unsigned long) page->freelist;
  133. BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
  134. type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
  135. if (page_ref_dec_return(page) == 1) {
  136. page->freelist = NULL;
  137. ClearPagePrivate(page);
  138. set_page_private(page, 0);
  139. INIT_LIST_HEAD(&page->lru);
  140. free_reserved_page(page);
  141. }
  142. }
  143. #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
  144. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  145. static void register_page_bootmem_info_section(unsigned long start_pfn)
  146. {
  147. unsigned long mapsize, section_nr, i;
  148. struct mem_section *ms;
  149. struct page *page, *memmap;
  150. struct mem_section_usage *usage;
  151. section_nr = pfn_to_section_nr(start_pfn);
  152. ms = __nr_to_section(section_nr);
  153. /* Get section's memmap address */
  154. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  155. /*
  156. * Get page for the memmap's phys address
  157. * XXX: need more consideration for sparse_vmemmap...
  158. */
  159. page = virt_to_page(memmap);
  160. mapsize = sizeof(struct page) * PAGES_PER_SECTION;
  161. mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
  162. /* remember memmap's page */
  163. for (i = 0; i < mapsize; i++, page++)
  164. get_page_bootmem(section_nr, page, SECTION_INFO);
  165. usage = ms->usage;
  166. page = virt_to_page(usage);
  167. mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
  168. for (i = 0; i < mapsize; i++, page++)
  169. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  170. }
  171. #else /* CONFIG_SPARSEMEM_VMEMMAP */
  172. static void register_page_bootmem_info_section(unsigned long start_pfn)
  173. {
  174. unsigned long mapsize, section_nr, i;
  175. struct mem_section *ms;
  176. struct page *page, *memmap;
  177. struct mem_section_usage *usage;
  178. section_nr = pfn_to_section_nr(start_pfn);
  179. ms = __nr_to_section(section_nr);
  180. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  181. register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
  182. usage = ms->usage;
  183. page = virt_to_page(usage);
  184. mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
  185. for (i = 0; i < mapsize; i++, page++)
  186. get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
  187. }
  188. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  189. void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
  190. {
  191. unsigned long i, pfn, end_pfn, nr_pages;
  192. int node = pgdat->node_id;
  193. struct page *page;
  194. nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
  195. page = virt_to_page(pgdat);
  196. for (i = 0; i < nr_pages; i++, page++)
  197. get_page_bootmem(node, page, NODE_INFO);
  198. pfn = pgdat->node_start_pfn;
  199. end_pfn = pgdat_end_pfn(pgdat);
  200. /* register section info */
  201. for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  202. /*
  203. * Some platforms can assign the same pfn to multiple nodes - on
  204. * node0 as well as nodeN. To avoid registering a pfn against
  205. * multiple nodes we check that this pfn does not already
  206. * reside in some other nodes.
  207. */
  208. if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
  209. register_page_bootmem_info_section(pfn);
  210. }
  211. }
  212. #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
  213. static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
  214. const char *reason)
  215. {
  216. /*
  217. * Disallow all operations smaller than a sub-section and only
  218. * allow operations smaller than a section for
  219. * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
  220. * enforces a larger memory_block_size_bytes() granularity for
  221. * memory that will be marked online, so this check should only
  222. * fire for direct arch_{add,remove}_memory() users outside of
  223. * add_memory_resource().
  224. */
  225. unsigned long min_align;
  226. if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
  227. min_align = PAGES_PER_SUBSECTION;
  228. else
  229. min_align = PAGES_PER_SECTION;
  230. if (!IS_ALIGNED(pfn, min_align)
  231. || !IS_ALIGNED(nr_pages, min_align)) {
  232. WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n",
  233. reason, pfn, pfn + nr_pages - 1);
  234. return -EINVAL;
  235. }
  236. return 0;
  237. }
  238. static int check_hotplug_memory_addressable(unsigned long pfn,
  239. unsigned long nr_pages)
  240. {
  241. const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1;
  242. if (max_addr >> MAX_PHYSMEM_BITS) {
  243. const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1;
  244. WARN(1,
  245. "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n",
  246. (u64)PFN_PHYS(pfn), max_addr, max_allowed);
  247. return -E2BIG;
  248. }
  249. return 0;
  250. }
  251. /*
  252. * Reasonably generic function for adding memory. It is
  253. * expected that archs that support memory hotplug will
  254. * call this function after deciding the zone to which to
  255. * add the new pages.
  256. */
  257. int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
  258. struct mhp_params *params)
  259. {
  260. const unsigned long end_pfn = pfn + nr_pages;
  261. unsigned long cur_nr_pages;
  262. int err;
  263. struct vmem_altmap *altmap = params->altmap;
  264. if (WARN_ON_ONCE(!params->pgprot.pgprot))
  265. return -EINVAL;
  266. err = check_hotplug_memory_addressable(pfn, nr_pages);
  267. if (err)
  268. return err;
  269. if (altmap) {
  270. /*
  271. * Validate altmap is within bounds of the total request
  272. */
  273. if (altmap->base_pfn != pfn
  274. || vmem_altmap_offset(altmap) > nr_pages) {
  275. pr_warn_once("memory add fail, invalid altmap\n");
  276. return -EINVAL;
  277. }
  278. altmap->alloc = 0;
  279. }
  280. err = check_pfn_span(pfn, nr_pages, "add");
  281. if (err)
  282. return err;
  283. for (; pfn < end_pfn; pfn += cur_nr_pages) {
  284. /* Select all remaining pages up to the next section boundary */
  285. cur_nr_pages = min(end_pfn - pfn,
  286. SECTION_ALIGN_UP(pfn + 1) - pfn);
  287. err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
  288. if (err)
  289. break;
  290. cond_resched();
  291. }
  292. vmemmap_populate_print_last();
  293. return err;
  294. }
  295. /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
  296. static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
  297. unsigned long start_pfn,
  298. unsigned long end_pfn)
  299. {
  300. for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
  301. if (unlikely(!pfn_to_online_page(start_pfn)))
  302. continue;
  303. if (unlikely(pfn_to_nid(start_pfn) != nid))
  304. continue;
  305. if (zone != page_zone(pfn_to_page(start_pfn)))
  306. continue;
  307. return start_pfn;
  308. }
  309. return 0;
  310. }
  311. /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
  312. static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
  313. unsigned long start_pfn,
  314. unsigned long end_pfn)
  315. {
  316. unsigned long pfn;
  317. /* pfn is the end pfn of a memory section. */
  318. pfn = end_pfn - 1;
  319. for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
  320. if (unlikely(!pfn_to_online_page(pfn)))
  321. continue;
  322. if (unlikely(pfn_to_nid(pfn) != nid))
  323. continue;
  324. if (zone != page_zone(pfn_to_page(pfn)))
  325. continue;
  326. return pfn;
  327. }
  328. return 0;
  329. }
  330. static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
  331. unsigned long end_pfn)
  332. {
  333. unsigned long pfn;
  334. int nid = zone_to_nid(zone);
  335. zone_span_writelock(zone);
  336. if (zone->zone_start_pfn == start_pfn) {
  337. /*
  338. * If the section is smallest section in the zone, it need
  339. * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
  340. * In this case, we find second smallest valid mem_section
  341. * for shrinking zone.
  342. */
  343. pfn = find_smallest_section_pfn(nid, zone, end_pfn,
  344. zone_end_pfn(zone));
  345. if (pfn) {
  346. zone->spanned_pages = zone_end_pfn(zone) - pfn;
  347. zone->zone_start_pfn = pfn;
  348. } else {
  349. zone->zone_start_pfn = 0;
  350. zone->spanned_pages = 0;
  351. }
  352. } else if (zone_end_pfn(zone) == end_pfn) {
  353. /*
  354. * If the section is biggest section in the zone, it need
  355. * shrink zone->spanned_pages.
  356. * In this case, we find second biggest valid mem_section for
  357. * shrinking zone.
  358. */
  359. pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
  360. start_pfn);
  361. if (pfn)
  362. zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
  363. else {
  364. zone->zone_start_pfn = 0;
  365. zone->spanned_pages = 0;
  366. }
  367. }
  368. zone_span_writeunlock(zone);
  369. }
  370. static void update_pgdat_span(struct pglist_data *pgdat)
  371. {
  372. unsigned long node_start_pfn = 0, node_end_pfn = 0;
  373. struct zone *zone;
  374. for (zone = pgdat->node_zones;
  375. zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
  376. unsigned long zone_end_pfn = zone->zone_start_pfn +
  377. zone->spanned_pages;
  378. /* No need to lock the zones, they can't change. */
  379. if (!zone->spanned_pages)
  380. continue;
  381. if (!node_end_pfn) {
  382. node_start_pfn = zone->zone_start_pfn;
  383. node_end_pfn = zone_end_pfn;
  384. continue;
  385. }
  386. if (zone_end_pfn > node_end_pfn)
  387. node_end_pfn = zone_end_pfn;
  388. if (zone->zone_start_pfn < node_start_pfn)
  389. node_start_pfn = zone->zone_start_pfn;
  390. }
  391. pgdat->node_start_pfn = node_start_pfn;
  392. pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
  393. }
  394. void __ref remove_pfn_range_from_zone(struct zone *zone,
  395. unsigned long start_pfn,
  396. unsigned long nr_pages)
  397. {
  398. const unsigned long end_pfn = start_pfn + nr_pages;
  399. struct pglist_data *pgdat = zone->zone_pgdat;
  400. unsigned long pfn, cur_nr_pages, flags;
  401. /* Poison struct pages because they are now uninitialized again. */
  402. for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
  403. cond_resched();
  404. /* Select all remaining pages up to the next section boundary */
  405. cur_nr_pages =
  406. min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
  407. page_init_poison(pfn_to_page(pfn),
  408. sizeof(struct page) * cur_nr_pages);
  409. }
  410. #ifdef CONFIG_ZONE_DEVICE
  411. /*
  412. * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
  413. * we will not try to shrink the zones - which is okay as
  414. * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
  415. */
  416. if (zone_idx(zone) == ZONE_DEVICE)
  417. return;
  418. #endif
  419. clear_zone_contiguous(zone);
  420. pgdat_resize_lock(zone->zone_pgdat, &flags);
  421. shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
  422. update_pgdat_span(pgdat);
  423. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  424. set_zone_contiguous(zone);
  425. }
  426. static void __remove_section(unsigned long pfn, unsigned long nr_pages,
  427. unsigned long map_offset,
  428. struct vmem_altmap *altmap)
  429. {
  430. struct mem_section *ms = __pfn_to_section(pfn);
  431. if (WARN_ON_ONCE(!valid_section(ms)))
  432. return;
  433. sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
  434. }
  435. /**
  436. * __remove_pages() - remove sections of pages
  437. * @pfn: starting pageframe (must be aligned to start of a section)
  438. * @nr_pages: number of pages to remove (must be multiple of section size)
  439. * @altmap: alternative device page map or %NULL if default memmap is used
  440. *
  441. * Generic helper function to remove section mappings and sysfs entries
  442. * for the section of the memory we are removing. Caller needs to make
  443. * sure that pages are marked reserved and zones are adjust properly by
  444. * calling offline_pages().
  445. */
  446. void __remove_pages(unsigned long pfn, unsigned long nr_pages,
  447. struct vmem_altmap *altmap)
  448. {
  449. const unsigned long end_pfn = pfn + nr_pages;
  450. unsigned long cur_nr_pages;
  451. unsigned long map_offset = 0;
  452. map_offset = vmem_altmap_offset(altmap);
  453. if (check_pfn_span(pfn, nr_pages, "remove"))
  454. return;
  455. for (; pfn < end_pfn; pfn += cur_nr_pages) {
  456. cond_resched();
  457. /* Select all remaining pages up to the next section boundary */
  458. cur_nr_pages = min(end_pfn - pfn,
  459. SECTION_ALIGN_UP(pfn + 1) - pfn);
  460. __remove_section(pfn, cur_nr_pages, map_offset, altmap);
  461. map_offset = 0;
  462. }
  463. }
  464. int set_online_page_callback(online_page_callback_t callback)
  465. {
  466. int rc = -EINVAL;
  467. get_online_mems();
  468. mutex_lock(&online_page_callback_lock);
  469. if (online_page_callback == generic_online_page) {
  470. online_page_callback = callback;
  471. rc = 0;
  472. }
  473. mutex_unlock(&online_page_callback_lock);
  474. put_online_mems();
  475. return rc;
  476. }
  477. EXPORT_SYMBOL_GPL(set_online_page_callback);
  478. int restore_online_page_callback(online_page_callback_t callback)
  479. {
  480. int rc = -EINVAL;
  481. get_online_mems();
  482. mutex_lock(&online_page_callback_lock);
  483. if (online_page_callback == callback) {
  484. online_page_callback = generic_online_page;
  485. rc = 0;
  486. }
  487. mutex_unlock(&online_page_callback_lock);
  488. put_online_mems();
  489. return rc;
  490. }
  491. EXPORT_SYMBOL_GPL(restore_online_page_callback);
  492. void generic_online_page(struct page *page, unsigned int order)
  493. {
  494. /*
  495. * Freeing the page with debug_pagealloc enabled will try to unmap it,
  496. * so we should map it first. This is better than introducing a special
  497. * case in page freeing fast path.
  498. */
  499. debug_pagealloc_map_pages(page, 1 << order);
  500. __free_pages_core(page, order);
  501. totalram_pages_add(1UL << order);
  502. #ifdef CONFIG_HIGHMEM
  503. if (PageHighMem(page))
  504. totalhigh_pages_add(1UL << order);
  505. #endif
  506. }
  507. EXPORT_SYMBOL_GPL(generic_online_page);
  508. static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
  509. {
  510. const unsigned long end_pfn = start_pfn + nr_pages;
  511. unsigned long pfn;
  512. /*
  513. * Online the pages in MAX_ORDER - 1 aligned chunks. The callback might
  514. * decide to not expose all pages to the buddy (e.g., expose them
  515. * later). We account all pages as being online and belonging to this
  516. * zone ("present").
  517. */
  518. for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES)
  519. (*online_page_callback)(pfn_to_page(pfn), MAX_ORDER - 1);
  520. /* mark all involved sections as online */
  521. online_mem_sections(start_pfn, end_pfn);
  522. }
  523. /* check which state of node_states will be changed when online memory */
  524. static void node_states_check_changes_online(unsigned long nr_pages,
  525. struct zone *zone, struct memory_notify *arg)
  526. {
  527. int nid = zone_to_nid(zone);
  528. arg->status_change_nid = NUMA_NO_NODE;
  529. arg->status_change_nid_normal = NUMA_NO_NODE;
  530. arg->status_change_nid_high = NUMA_NO_NODE;
  531. if (!node_state(nid, N_MEMORY))
  532. arg->status_change_nid = nid;
  533. if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY))
  534. arg->status_change_nid_normal = nid;
  535. #ifdef CONFIG_HIGHMEM
  536. if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY))
  537. arg->status_change_nid_high = nid;
  538. #endif
  539. }
  540. static void node_states_set_node(int node, struct memory_notify *arg)
  541. {
  542. if (arg->status_change_nid_normal >= 0)
  543. node_set_state(node, N_NORMAL_MEMORY);
  544. if (arg->status_change_nid_high >= 0)
  545. node_set_state(node, N_HIGH_MEMORY);
  546. if (arg->status_change_nid >= 0)
  547. node_set_state(node, N_MEMORY);
  548. }
  549. static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
  550. unsigned long nr_pages)
  551. {
  552. unsigned long old_end_pfn = zone_end_pfn(zone);
  553. if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
  554. zone->zone_start_pfn = start_pfn;
  555. zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
  556. }
  557. static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
  558. unsigned long nr_pages)
  559. {
  560. unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
  561. if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
  562. pgdat->node_start_pfn = start_pfn;
  563. pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
  564. }
  565. /*
  566. * Associate the pfn range with the given zone, initializing the memmaps
  567. * and resizing the pgdat/zone data to span the added pages. After this
  568. * call, all affected pages are PG_reserved.
  569. *
  570. * All aligned pageblocks are initialized to the specified migratetype
  571. * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
  572. * zone stats (e.g., nr_isolate_pageblock) are touched.
  573. */
  574. void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
  575. unsigned long nr_pages,
  576. struct vmem_altmap *altmap, int migratetype)
  577. {
  578. struct pglist_data *pgdat = zone->zone_pgdat;
  579. int nid = pgdat->node_id;
  580. unsigned long flags;
  581. clear_zone_contiguous(zone);
  582. /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
  583. pgdat_resize_lock(pgdat, &flags);
  584. zone_span_writelock(zone);
  585. if (zone_is_empty(zone))
  586. init_currently_empty_zone(zone, start_pfn, nr_pages);
  587. resize_zone_range(zone, start_pfn, nr_pages);
  588. zone_span_writeunlock(zone);
  589. resize_pgdat_range(pgdat, start_pfn, nr_pages);
  590. pgdat_resize_unlock(pgdat, &flags);
  591. /*
  592. * TODO now we have a visible range of pages which are not associated
  593. * with their zone properly. Not nice but set_pfnblock_flags_mask
  594. * expects the zone spans the pfn range. All the pages in the range
  595. * are reserved so nobody should be touching them so we should be safe
  596. */
  597. memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
  598. MEMINIT_HOTPLUG, altmap, migratetype);
  599. set_zone_contiguous(zone);
  600. }
  601. /*
  602. * Returns a default kernel memory zone for the given pfn range.
  603. * If no kernel zone covers this pfn range it will automatically go
  604. * to the ZONE_NORMAL.
  605. */
  606. static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
  607. unsigned long nr_pages)
  608. {
  609. struct pglist_data *pgdat = NODE_DATA(nid);
  610. int zid;
  611. for (zid = 0; zid <= ZONE_NORMAL; zid++) {
  612. struct zone *zone = &pgdat->node_zones[zid];
  613. if (zone_intersects(zone, start_pfn, nr_pages))
  614. return zone;
  615. }
  616. return &pgdat->node_zones[ZONE_NORMAL];
  617. }
  618. static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
  619. unsigned long nr_pages)
  620. {
  621. struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
  622. nr_pages);
  623. struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
  624. bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
  625. bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
  626. /*
  627. * We inherit the existing zone in a simple case where zones do not
  628. * overlap in the given range
  629. */
  630. if (in_kernel ^ in_movable)
  631. return (in_kernel) ? kernel_zone : movable_zone;
  632. /*
  633. * If the range doesn't belong to any zone or two zones overlap in the
  634. * given range then we use movable zone only if movable_node is
  635. * enabled because we always online to a kernel zone by default.
  636. */
  637. return movable_node_enabled ? movable_zone : kernel_zone;
  638. }
  639. struct zone *zone_for_pfn_range(int online_type, int nid,
  640. unsigned long start_pfn, unsigned long nr_pages)
  641. {
  642. if (online_type == MMOP_ONLINE_KERNEL)
  643. return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
  644. if (online_type == MMOP_ONLINE_MOVABLE)
  645. return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
  646. return default_zone_for_pfn(nid, start_pfn, nr_pages);
  647. }
  648. int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
  649. int online_type, int nid)
  650. {
  651. unsigned long flags;
  652. struct zone *zone;
  653. int need_zonelists_rebuild = 0;
  654. int ret;
  655. struct memory_notify arg;
  656. /* We can only online full sections (e.g., SECTION_IS_ONLINE) */
  657. if (WARN_ON_ONCE(!nr_pages ||
  658. !IS_ALIGNED(pfn | nr_pages, PAGES_PER_SECTION)))
  659. return -EINVAL;
  660. mem_hotplug_begin();
  661. /* associate pfn range with the zone */
  662. zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
  663. move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
  664. arg.start_pfn = pfn;
  665. arg.nr_pages = nr_pages;
  666. node_states_check_changes_online(nr_pages, zone, &arg);
  667. ret = memory_notify(MEM_GOING_ONLINE, &arg);
  668. ret = notifier_to_errno(ret);
  669. if (ret)
  670. goto failed_addition;
  671. /*
  672. * Fixup the number of isolated pageblocks before marking the sections
  673. * onlining, such that undo_isolate_page_range() works correctly.
  674. */
  675. spin_lock_irqsave(&zone->lock, flags);
  676. zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
  677. spin_unlock_irqrestore(&zone->lock, flags);
  678. /*
  679. * If this zone is not populated, then it is not in zonelist.
  680. * This means the page allocator ignores this zone.
  681. * So, zonelist must be updated after online.
  682. */
  683. if (!populated_zone(zone)) {
  684. need_zonelists_rebuild = 1;
  685. setup_zone_pageset(zone);
  686. }
  687. online_pages_range(pfn, nr_pages);
  688. zone->present_pages += nr_pages;
  689. pgdat_resize_lock(zone->zone_pgdat, &flags);
  690. zone->zone_pgdat->node_present_pages += nr_pages;
  691. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  692. node_states_set_node(nid, &arg);
  693. if (need_zonelists_rebuild)
  694. build_all_zonelists(NULL);
  695. zone_pcp_update(zone);
  696. /* Basic onlining is complete, allow allocation of onlined pages. */
  697. undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
  698. /*
  699. * Freshly onlined pages aren't shuffled (e.g., all pages are placed to
  700. * the tail of the freelist when undoing isolation). Shuffle the whole
  701. * zone to make sure the just onlined pages are properly distributed
  702. * across the whole freelist - to create an initial shuffle.
  703. */
  704. shuffle_zone(zone);
  705. init_per_zone_wmark_min();
  706. kswapd_run(nid);
  707. kcompactd_run(nid);
  708. writeback_set_ratelimit();
  709. memory_notify(MEM_ONLINE, &arg);
  710. mem_hotplug_done();
  711. return 0;
  712. failed_addition:
  713. pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
  714. (unsigned long long) pfn << PAGE_SHIFT,
  715. (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
  716. memory_notify(MEM_CANCEL_ONLINE, &arg);
  717. remove_pfn_range_from_zone(zone, pfn, nr_pages);
  718. mem_hotplug_done();
  719. return ret;
  720. }
  721. #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
  722. static void reset_node_present_pages(pg_data_t *pgdat)
  723. {
  724. struct zone *z;
  725. for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
  726. z->present_pages = 0;
  727. pgdat->node_present_pages = 0;
  728. }
  729. /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
  730. static pg_data_t __ref *hotadd_new_pgdat(int nid)
  731. {
  732. struct pglist_data *pgdat;
  733. pgdat = NODE_DATA(nid);
  734. if (!pgdat) {
  735. pgdat = arch_alloc_nodedata(nid);
  736. if (!pgdat)
  737. return NULL;
  738. pgdat->per_cpu_nodestats =
  739. alloc_percpu(struct per_cpu_nodestat);
  740. arch_refresh_nodedata(nid, pgdat);
  741. } else {
  742. int cpu;
  743. /*
  744. * Reset the nr_zones, order and highest_zoneidx before reuse.
  745. * Note that kswapd will init kswapd_highest_zoneidx properly
  746. * when it starts in the near future.
  747. */
  748. pgdat->nr_zones = 0;
  749. pgdat->kswapd_order = 0;
  750. pgdat->kswapd_highest_zoneidx = 0;
  751. for_each_online_cpu(cpu) {
  752. struct per_cpu_nodestat *p;
  753. p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
  754. memset(p, 0, sizeof(*p));
  755. }
  756. }
  757. /* we can use NODE_DATA(nid) from here */
  758. pgdat->node_id = nid;
  759. pgdat->node_start_pfn = 0;
  760. /* init node's zones as empty zones, we don't have any present pages.*/
  761. free_area_init_core_hotplug(nid);
  762. /*
  763. * The node we allocated has no zone fallback lists. For avoiding
  764. * to access not-initialized zonelist, build here.
  765. */
  766. build_all_zonelists(pgdat);
  767. /*
  768. * When memory is hot-added, all the memory is in offline state. So
  769. * clear all zones' present_pages because they will be updated in
  770. * online_pages() and offline_pages().
  771. */
  772. reset_node_managed_pages(pgdat);
  773. reset_node_present_pages(pgdat);
  774. return pgdat;
  775. }
  776. static void rollback_node_hotadd(int nid)
  777. {
  778. pg_data_t *pgdat = NODE_DATA(nid);
  779. arch_refresh_nodedata(nid, NULL);
  780. free_percpu(pgdat->per_cpu_nodestats);
  781. arch_free_nodedata(pgdat);
  782. }
  783. /**
  784. * try_online_node - online a node if offlined
  785. * @nid: the node ID
  786. * @set_node_online: Whether we want to online the node
  787. * called by cpu_up() to online a node without onlined memory.
  788. *
  789. * Returns:
  790. * 1 -> a new node has been allocated
  791. * 0 -> the node is already online
  792. * -ENOMEM -> the node could not be allocated
  793. */
  794. static int __try_online_node(int nid, bool set_node_online)
  795. {
  796. pg_data_t *pgdat;
  797. int ret = 1;
  798. if (node_online(nid))
  799. return 0;
  800. pgdat = hotadd_new_pgdat(nid);
  801. if (!pgdat) {
  802. pr_err("Cannot online node %d due to NULL pgdat\n", nid);
  803. ret = -ENOMEM;
  804. goto out;
  805. }
  806. if (set_node_online) {
  807. node_set_online(nid);
  808. ret = register_one_node(nid);
  809. BUG_ON(ret);
  810. }
  811. out:
  812. return ret;
  813. }
  814. /*
  815. * Users of this function always want to online/register the node
  816. */
  817. int try_online_node(int nid)
  818. {
  819. int ret;
  820. mem_hotplug_begin();
  821. ret = __try_online_node(nid, true);
  822. mem_hotplug_done();
  823. return ret;
  824. }
  825. static int check_hotplug_memory_range(u64 start, u64 size)
  826. {
  827. /* memory range must be block size aligned */
  828. if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
  829. !IS_ALIGNED(size, memory_block_size_bytes())) {
  830. pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
  831. memory_block_size_bytes(), start, size);
  832. return -EINVAL;
  833. }
  834. return 0;
  835. }
  836. static int online_memory_block(struct memory_block *mem, void *arg)
  837. {
  838. mem->online_type = memhp_default_online_type;
  839. return device_online(&mem->dev);
  840. }
  841. /*
  842. * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  843. * and online/offline operations (triggered e.g. by sysfs).
  844. *
  845. * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
  846. */
  847. int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
  848. {
  849. struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
  850. u64 start, size;
  851. bool new_node = false;
  852. int ret;
  853. start = res->start;
  854. size = resource_size(res);
  855. ret = check_hotplug_memory_range(start, size);
  856. if (ret)
  857. return ret;
  858. if (!node_possible(nid)) {
  859. WARN(1, "node %d was absent from the node_possible_map\n", nid);
  860. return -EINVAL;
  861. }
  862. mem_hotplug_begin();
  863. if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
  864. memblock_add_node(start, size, nid);
  865. ret = __try_online_node(nid, false);
  866. if (ret < 0)
  867. goto error;
  868. new_node = ret;
  869. /* call arch's memory hotadd */
  870. ret = arch_add_memory(nid, start, size, &params);
  871. if (ret < 0)
  872. goto error;
  873. /* create memory block devices after memory was added */
  874. ret = create_memory_block_devices(start, size);
  875. if (ret) {
  876. arch_remove_memory(nid, start, size, NULL);
  877. goto error;
  878. }
  879. if (new_node) {
  880. /* If sysfs file of new node can't be created, cpu on the node
  881. * can't be hot-added. There is no rollback way now.
  882. * So, check by BUG_ON() to catch it reluctantly..
  883. * We online node here. We can't roll back from here.
  884. */
  885. node_set_online(nid);
  886. ret = __register_one_node(nid);
  887. BUG_ON(ret);
  888. }
  889. /* link memory sections under this node.*/
  890. link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
  891. MEMINIT_HOTPLUG);
  892. /* create new memmap entry */
  893. if (!strcmp(res->name, "System RAM"))
  894. firmware_map_add_hotplug(start, start + size, "System RAM");
  895. /* device_online() will take the lock when calling online_pages() */
  896. mem_hotplug_done();
  897. /*
  898. * In case we're allowed to merge the resource, flag it and trigger
  899. * merging now that adding succeeded.
  900. */
  901. if (mhp_flags & MEMHP_MERGE_RESOURCE)
  902. merge_system_ram_resource(res);
  903. /* online pages if requested */
  904. if (memhp_default_online_type != MMOP_OFFLINE)
  905. walk_memory_blocks(start, size, NULL, online_memory_block);
  906. return ret;
  907. error:
  908. /* rollback pgdat allocation and others */
  909. if (new_node)
  910. rollback_node_hotadd(nid);
  911. if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
  912. memblock_remove(start, size);
  913. mem_hotplug_done();
  914. return ret;
  915. }
  916. /* requires device_hotplug_lock, see add_memory_resource() */
  917. int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
  918. {
  919. struct resource *res;
  920. int ret;
  921. res = register_memory_resource(start, size, "System RAM");
  922. if (IS_ERR(res))
  923. return PTR_ERR(res);
  924. ret = add_memory_resource(nid, res, mhp_flags);
  925. if (ret < 0)
  926. release_memory_resource(res);
  927. return ret;
  928. }
  929. int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
  930. {
  931. int rc;
  932. lock_device_hotplug();
  933. rc = __add_memory(nid, start, size, mhp_flags);
  934. unlock_device_hotplug();
  935. return rc;
  936. }
  937. EXPORT_SYMBOL_GPL(add_memory);
  938. int add_memory_subsection(int nid, u64 start, u64 size)
  939. {
  940. struct mhp_params params = { .pgprot = PAGE_KERNEL };
  941. struct resource *res;
  942. int ret;
  943. if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
  944. !IS_ALIGNED(size, SUBSECTION_SIZE)) {
  945. pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
  946. __func__, start, size);
  947. return -EINVAL;
  948. }
  949. res = register_memory_resource(start, size, "System RAM");
  950. if (IS_ERR(res))
  951. return PTR_ERR(res);
  952. mem_hotplug_begin();
  953. nid = memory_add_physaddr_to_nid(start);
  954. if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
  955. memblock_add_node(start, size, nid);
  956. ret = arch_add_memory(nid, start, size, &params);
  957. if (ret) {
  958. if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
  959. memblock_remove(start, size);
  960. pr_err("%s failed to add subsection start 0x%llx size 0x%llx\n",
  961. __func__, start, size);
  962. }
  963. mem_hotplug_done();
  964. return ret;
  965. }
  966. EXPORT_SYMBOL_GPL(add_memory_subsection);
  967. /*
  968. * Add special, driver-managed memory to the system as system RAM. Such
  969. * memory is not exposed via the raw firmware-provided memmap as system
  970. * RAM, instead, it is detected and added by a driver - during cold boot,
  971. * after a reboot, and after kexec.
  972. *
  973. * Reasons why this memory should not be used for the initial memmap of a
  974. * kexec kernel or for placing kexec images:
  975. * - The booting kernel is in charge of determining how this memory will be
  976. * used (e.g., use persistent memory as system RAM)
  977. * - Coordination with a hypervisor is required before this memory
  978. * can be used (e.g., inaccessible parts).
  979. *
  980. * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
  981. * memory map") are created. Also, the created memory resource is flagged
  982. * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
  983. * this memory as well (esp., not place kexec images onto it).
  984. *
  985. * The resource_name (visible via /proc/iomem) has to have the format
  986. * "System RAM ($DRIVER)".
  987. */
  988. int add_memory_driver_managed(int nid, u64 start, u64 size,
  989. const char *resource_name, mhp_t mhp_flags)
  990. {
  991. struct resource *res;
  992. int rc;
  993. if (!resource_name ||
  994. strstr(resource_name, "System RAM (") != resource_name ||
  995. resource_name[strlen(resource_name) - 1] != ')')
  996. return -EINVAL;
  997. lock_device_hotplug();
  998. res = register_memory_resource(start, size, resource_name);
  999. if (IS_ERR(res)) {
  1000. rc = PTR_ERR(res);
  1001. goto out_unlock;
  1002. }
  1003. rc = add_memory_resource(nid, res, mhp_flags);
  1004. if (rc < 0)
  1005. release_memory_resource(res);
  1006. out_unlock:
  1007. unlock_device_hotplug();
  1008. return rc;
  1009. }
  1010. EXPORT_SYMBOL_GPL(add_memory_driver_managed);
  1011. #ifdef CONFIG_MEMORY_HOTREMOVE
  1012. /*
  1013. * Confirm all pages in a range [start, end) belong to the same zone (skipping
  1014. * memory holes). When true, return the zone.
  1015. */
  1016. struct zone *test_pages_in_a_zone(unsigned long start_pfn,
  1017. unsigned long end_pfn)
  1018. {
  1019. unsigned long pfn, sec_end_pfn;
  1020. struct zone *zone = NULL;
  1021. struct page *page;
  1022. int i;
  1023. for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
  1024. pfn < end_pfn;
  1025. pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
  1026. /* Make sure the memory section is present first */
  1027. if (!present_section_nr(pfn_to_section_nr(pfn)))
  1028. continue;
  1029. for (; pfn < sec_end_pfn && pfn < end_pfn;
  1030. pfn += MAX_ORDER_NR_PAGES) {
  1031. i = 0;
  1032. /* This is just a CONFIG_HOLES_IN_ZONE check.*/
  1033. while ((i < MAX_ORDER_NR_PAGES) &&
  1034. !pfn_valid_within(pfn + i))
  1035. i++;
  1036. if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
  1037. continue;
  1038. /* Check if we got outside of the zone */
  1039. if (zone && !zone_spans_pfn(zone, pfn + i))
  1040. return NULL;
  1041. page = pfn_to_page(pfn + i);
  1042. if (zone && page_zone(page) != zone)
  1043. return NULL;
  1044. zone = page_zone(page);
  1045. }
  1046. }
  1047. return zone;
  1048. }
  1049. /*
  1050. * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
  1051. * non-lru movable pages and hugepages). Will skip over most unmovable
  1052. * pages (esp., pages that can be skipped when offlining), but bail out on
  1053. * definitely unmovable pages.
  1054. *
  1055. * Returns:
  1056. * 0 in case a movable page is found and movable_pfn was updated.
  1057. * -ENOENT in case no movable page was found.
  1058. * -EBUSY in case a definitely unmovable page was found.
  1059. */
  1060. static int scan_movable_pages(unsigned long start, unsigned long end,
  1061. unsigned long *movable_pfn)
  1062. {
  1063. unsigned long pfn;
  1064. for (pfn = start; pfn < end; pfn++) {
  1065. struct page *page, *head;
  1066. unsigned long skip;
  1067. if (!pfn_valid(pfn))
  1068. continue;
  1069. page = pfn_to_page(pfn);
  1070. if (PageLRU(page))
  1071. goto found;
  1072. if (__PageMovable(page))
  1073. goto found;
  1074. /*
  1075. * PageOffline() pages that are not marked __PageMovable() and
  1076. * have a reference count > 0 (after MEM_GOING_OFFLINE) are
  1077. * definitely unmovable. If their reference count would be 0,
  1078. * they could at least be skipped when offlining memory.
  1079. */
  1080. if (PageOffline(page) && page_count(page))
  1081. return -EBUSY;
  1082. if (!PageHuge(page))
  1083. continue;
  1084. head = compound_head(page);
  1085. if (page_huge_active(head))
  1086. goto found;
  1087. skip = compound_nr(head) - (page - head);
  1088. pfn += skip - 1;
  1089. }
  1090. return -ENOENT;
  1091. found:
  1092. *movable_pfn = pfn;
  1093. return 0;
  1094. }
  1095. static int
  1096. do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
  1097. {
  1098. unsigned long pfn;
  1099. struct page *page, *head;
  1100. int ret = 0;
  1101. LIST_HEAD(source);
  1102. static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
  1103. DEFAULT_RATELIMIT_BURST);
  1104. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  1105. if (!pfn_valid(pfn))
  1106. continue;
  1107. page = pfn_to_page(pfn);
  1108. head = compound_head(page);
  1109. if (PageHuge(page)) {
  1110. pfn = page_to_pfn(head) + compound_nr(head) - 1;
  1111. isolate_huge_page(head, &source);
  1112. continue;
  1113. } else if (PageTransHuge(page))
  1114. pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
  1115. /*
  1116. * HWPoison pages have elevated reference counts so the migration would
  1117. * fail on them. It also doesn't make any sense to migrate them in the
  1118. * first place. Still try to unmap such a page in case it is still mapped
  1119. * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
  1120. * the unmap as the catch all safety net).
  1121. */
  1122. if (PageHWPoison(page)) {
  1123. if (WARN_ON(PageLRU(page)))
  1124. isolate_lru_page(page);
  1125. if (page_mapped(page))
  1126. try_to_unmap(page, TTU_IGNORE_MLOCK);
  1127. continue;
  1128. }
  1129. if (!get_page_unless_zero(page))
  1130. continue;
  1131. /*
  1132. * We can skip free pages. And we can deal with pages on
  1133. * LRU and non-lru movable pages.
  1134. */
  1135. if (PageLRU(page))
  1136. ret = isolate_lru_page(page);
  1137. else
  1138. ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
  1139. if (!ret) { /* Success */
  1140. list_add_tail(&page->lru, &source);
  1141. if (!__PageMovable(page))
  1142. inc_node_page_state(page, NR_ISOLATED_ANON +
  1143. page_is_file_lru(page));
  1144. } else {
  1145. if (__ratelimit(&migrate_rs)) {
  1146. pr_warn("failed to isolate pfn %lx\n", pfn);
  1147. dump_page(page, "isolation failed");
  1148. }
  1149. }
  1150. put_page(page);
  1151. }
  1152. if (!list_empty(&source)) {
  1153. nodemask_t nmask = node_states[N_MEMORY];
  1154. struct migration_target_control mtc = {
  1155. .nmask = &nmask,
  1156. .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
  1157. };
  1158. /*
  1159. * We have checked that migration range is on a single zone so
  1160. * we can use the nid of the first page to all the others.
  1161. */
  1162. mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
  1163. /*
  1164. * try to allocate from a different node but reuse this node
  1165. * if there are no other online nodes to be used (e.g. we are
  1166. * offlining a part of the only existing node)
  1167. */
  1168. node_clear(mtc.nid, nmask);
  1169. if (nodes_empty(nmask))
  1170. node_set(mtc.nid, nmask);
  1171. ret = migrate_pages(&source, alloc_migration_target, NULL,
  1172. (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
  1173. if (ret) {
  1174. list_for_each_entry(page, &source, lru) {
  1175. if (__ratelimit(&migrate_rs)) {
  1176. pr_warn("migrating pfn %lx failed ret:%d\n",
  1177. page_to_pfn(page), ret);
  1178. dump_page(page, "migration failure");
  1179. }
  1180. }
  1181. putback_movable_pages(&source);
  1182. }
  1183. }
  1184. return ret;
  1185. }
  1186. static int __init cmdline_parse_movable_node(char *p)
  1187. {
  1188. movable_node_enabled = true;
  1189. return 0;
  1190. }
  1191. early_param("movable_node", cmdline_parse_movable_node);
  1192. /* check which state of node_states will be changed when offline memory */
  1193. static void node_states_check_changes_offline(unsigned long nr_pages,
  1194. struct zone *zone, struct memory_notify *arg)
  1195. {
  1196. struct pglist_data *pgdat = zone->zone_pgdat;
  1197. unsigned long present_pages = 0;
  1198. enum zone_type zt;
  1199. arg->status_change_nid = NUMA_NO_NODE;
  1200. arg->status_change_nid_normal = NUMA_NO_NODE;
  1201. arg->status_change_nid_high = NUMA_NO_NODE;
  1202. /*
  1203. * Check whether node_states[N_NORMAL_MEMORY] will be changed.
  1204. * If the memory to be offline is within the range
  1205. * [0..ZONE_NORMAL], and it is the last present memory there,
  1206. * the zones in that range will become empty after the offlining,
  1207. * thus we can determine that we need to clear the node from
  1208. * node_states[N_NORMAL_MEMORY].
  1209. */
  1210. for (zt = 0; zt <= ZONE_NORMAL; zt++)
  1211. present_pages += pgdat->node_zones[zt].present_pages;
  1212. if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
  1213. arg->status_change_nid_normal = zone_to_nid(zone);
  1214. #ifdef CONFIG_HIGHMEM
  1215. /*
  1216. * node_states[N_HIGH_MEMORY] contains nodes which
  1217. * have normal memory or high memory.
  1218. * Here we add the present_pages belonging to ZONE_HIGHMEM.
  1219. * If the zone is within the range of [0..ZONE_HIGHMEM), and
  1220. * we determine that the zones in that range become empty,
  1221. * we need to clear the node for N_HIGH_MEMORY.
  1222. */
  1223. present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  1224. if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages)
  1225. arg->status_change_nid_high = zone_to_nid(zone);
  1226. #endif
  1227. /*
  1228. * We have accounted the pages from [0..ZONE_NORMAL), and
  1229. * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM
  1230. * as well.
  1231. * Here we count the possible pages from ZONE_MOVABLE.
  1232. * If after having accounted all the pages, we see that the nr_pages
  1233. * to be offlined is over or equal to the accounted pages,
  1234. * we know that the node will become empty, and so, we can clear
  1235. * it for N_MEMORY as well.
  1236. */
  1237. present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages;
  1238. if (nr_pages >= present_pages)
  1239. arg->status_change_nid = zone_to_nid(zone);
  1240. }
  1241. static void node_states_clear_node(int node, struct memory_notify *arg)
  1242. {
  1243. if (arg->status_change_nid_normal >= 0)
  1244. node_clear_state(node, N_NORMAL_MEMORY);
  1245. if (arg->status_change_nid_high >= 0)
  1246. node_clear_state(node, N_HIGH_MEMORY);
  1247. if (arg->status_change_nid >= 0)
  1248. node_clear_state(node, N_MEMORY);
  1249. }
  1250. static int count_system_ram_pages_cb(unsigned long start_pfn,
  1251. unsigned long nr_pages, void *data)
  1252. {
  1253. unsigned long *nr_system_ram_pages = data;
  1254. *nr_system_ram_pages += nr_pages;
  1255. return 0;
  1256. }
  1257. int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
  1258. {
  1259. const unsigned long end_pfn = start_pfn + nr_pages;
  1260. unsigned long pfn, system_ram_pages = 0;
  1261. unsigned long flags;
  1262. struct zone *zone;
  1263. struct memory_notify arg;
  1264. int ret, node;
  1265. char *reason;
  1266. /* We can only offline full sections (e.g., SECTION_IS_ONLINE) */
  1267. if (WARN_ON_ONCE(!nr_pages ||
  1268. !IS_ALIGNED(start_pfn | nr_pages, PAGES_PER_SECTION)))
  1269. return -EINVAL;
  1270. mem_hotplug_begin();
  1271. /*
  1272. * Don't allow to offline memory blocks that contain holes.
  1273. * Consequently, memory blocks with holes can never get onlined
  1274. * via the hotplug path - online_pages() - as hotplugged memory has
  1275. * no holes. This way, we e.g., don't have to worry about marking
  1276. * memory holes PG_reserved, don't need pfn_valid() checks, and can
  1277. * avoid using walk_system_ram_range() later.
  1278. */
  1279. walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
  1280. count_system_ram_pages_cb);
  1281. if (system_ram_pages != nr_pages) {
  1282. ret = -EINVAL;
  1283. reason = "memory holes";
  1284. goto failed_removal;
  1285. }
  1286. /* This makes hotplug much easier...and readable.
  1287. we assume this for now. .*/
  1288. zone = test_pages_in_a_zone(start_pfn, end_pfn);
  1289. if (!zone) {
  1290. ret = -EINVAL;
  1291. reason = "multizone range";
  1292. goto failed_removal;
  1293. }
  1294. node = zone_to_nid(zone);
  1295. lru_cache_disable();
  1296. /* set above range as isolated */
  1297. ret = start_isolate_page_range(start_pfn, end_pfn,
  1298. MIGRATE_MOVABLE,
  1299. MEMORY_OFFLINE | REPORT_FAILURE, NULL);
  1300. if (ret) {
  1301. reason = "failure to isolate range";
  1302. goto failed_removal_lru_cache_disabled;
  1303. }
  1304. drain_all_pages(zone);
  1305. arg.start_pfn = start_pfn;
  1306. arg.nr_pages = nr_pages;
  1307. node_states_check_changes_offline(nr_pages, zone, &arg);
  1308. ret = memory_notify(MEM_GOING_OFFLINE, &arg);
  1309. ret = notifier_to_errno(ret);
  1310. if (ret) {
  1311. reason = "notifier failure";
  1312. goto failed_removal_isolated;
  1313. }
  1314. do {
  1315. pfn = start_pfn;
  1316. do {
  1317. if (signal_pending(current)) {
  1318. ret = -EINTR;
  1319. reason = "signal backoff";
  1320. goto failed_removal_isolated;
  1321. }
  1322. cond_resched();
  1323. ret = scan_movable_pages(pfn, end_pfn, &pfn);
  1324. if (!ret) {
  1325. /*
  1326. * TODO: fatal migration failures should bail
  1327. * out
  1328. */
  1329. do_migrate_range(pfn, end_pfn);
  1330. }
  1331. } while (!ret);
  1332. if (ret != -ENOENT) {
  1333. reason = "unmovable page";
  1334. goto failed_removal_isolated;
  1335. }
  1336. /*
  1337. * Dissolve free hugepages in the memory block before doing
  1338. * offlining actually in order to make hugetlbfs's object
  1339. * counting consistent.
  1340. */
  1341. ret = dissolve_free_huge_pages(start_pfn, end_pfn);
  1342. if (ret) {
  1343. reason = "failure to dissolve huge pages";
  1344. goto failed_removal_isolated;
  1345. }
  1346. /*
  1347. * per-cpu pages are drained after start_isolate_page_range, but
  1348. * if there are still pages that are not free, make sure that we
  1349. * drain again, because when we isolated range we might have
  1350. * raced with another thread that was adding pages to pcp list.
  1351. *
  1352. * Forward progress should be still guaranteed because
  1353. * pages on the pcp list can only belong to MOVABLE_ZONE
  1354. * because has_unmovable_pages explicitly checks for
  1355. * PageBuddy on freed pages on other zones.
  1356. */
  1357. ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE, NULL);
  1358. if (ret)
  1359. drain_all_pages(zone);
  1360. } while (ret);
  1361. /* Mark all sections offline and remove free pages from the buddy. */
  1362. __offline_isolated_pages(start_pfn, end_pfn);
  1363. pr_info("Offlined Pages %ld\n", nr_pages);
  1364. /*
  1365. * The memory sections are marked offline, and the pageblock flags
  1366. * effectively stale; nobody should be touching them. Fixup the number
  1367. * of isolated pageblocks, memory onlining will properly revert this.
  1368. */
  1369. spin_lock_irqsave(&zone->lock, flags);
  1370. zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
  1371. spin_unlock_irqrestore(&zone->lock, flags);
  1372. lru_cache_enable();
  1373. /* removal success */
  1374. adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
  1375. zone->present_pages -= nr_pages;
  1376. pgdat_resize_lock(zone->zone_pgdat, &flags);
  1377. zone->zone_pgdat->node_present_pages -= nr_pages;
  1378. pgdat_resize_unlock(zone->zone_pgdat, &flags);
  1379. init_per_zone_wmark_min();
  1380. if (!populated_zone(zone)) {
  1381. zone_pcp_reset(zone);
  1382. build_all_zonelists(NULL);
  1383. } else
  1384. zone_pcp_update(zone);
  1385. node_states_clear_node(node, &arg);
  1386. if (arg.status_change_nid >= 0) {
  1387. kswapd_stop(node);
  1388. kcompactd_stop(node);
  1389. }
  1390. writeback_set_ratelimit();
  1391. memory_notify(MEM_OFFLINE, &arg);
  1392. remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
  1393. mem_hotplug_done();
  1394. return 0;
  1395. failed_removal_isolated:
  1396. undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  1397. memory_notify(MEM_CANCEL_OFFLINE, &arg);
  1398. failed_removal_lru_cache_disabled:
  1399. lru_cache_enable();
  1400. failed_removal:
  1401. pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
  1402. (unsigned long long) start_pfn << PAGE_SHIFT,
  1403. ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
  1404. reason);
  1405. /* pushback to free area */
  1406. mem_hotplug_done();
  1407. return ret;
  1408. }
  1409. static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
  1410. {
  1411. int ret = !is_memblock_offlined(mem);
  1412. if (unlikely(ret)) {
  1413. phys_addr_t beginpa, endpa;
  1414. beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
  1415. endpa = beginpa + memory_block_size_bytes() - 1;
  1416. pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
  1417. &beginpa, &endpa);
  1418. return -EBUSY;
  1419. }
  1420. return 0;
  1421. }
  1422. static int check_cpu_on_node(pg_data_t *pgdat)
  1423. {
  1424. int cpu;
  1425. for_each_present_cpu(cpu) {
  1426. if (cpu_to_node(cpu) == pgdat->node_id)
  1427. /*
  1428. * the cpu on this node isn't removed, and we can't
  1429. * offline this node.
  1430. */
  1431. return -EBUSY;
  1432. }
  1433. return 0;
  1434. }
  1435. static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
  1436. {
  1437. int nid = *(int *)arg;
  1438. /*
  1439. * If a memory block belongs to multiple nodes, the stored nid is not
  1440. * reliable. However, such blocks are always online (e.g., cannot get
  1441. * offlined) and, therefore, are still spanned by the node.
  1442. */
  1443. return mem->nid == nid ? -EEXIST : 0;
  1444. }
  1445. /**
  1446. * try_offline_node
  1447. * @nid: the node ID
  1448. *
  1449. * Offline a node if all memory sections and cpus of the node are removed.
  1450. *
  1451. * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  1452. * and online/offline operations before this call.
  1453. */
  1454. void try_offline_node(int nid)
  1455. {
  1456. pg_data_t *pgdat = NODE_DATA(nid);
  1457. int rc;
  1458. /*
  1459. * If the node still spans pages (especially ZONE_DEVICE), don't
  1460. * offline it. A node spans memory after move_pfn_range_to_zone(),
  1461. * e.g., after the memory block was onlined.
  1462. */
  1463. if (pgdat->node_spanned_pages)
  1464. return;
  1465. /*
  1466. * Especially offline memory blocks might not be spanned by the
  1467. * node. They will get spanned by the node once they get onlined.
  1468. * However, they link to the node in sysfs and can get onlined later.
  1469. */
  1470. rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
  1471. if (rc)
  1472. return;
  1473. if (check_cpu_on_node(pgdat))
  1474. return;
  1475. /*
  1476. * all memory/cpu of this node are removed, we can offline this
  1477. * node now.
  1478. */
  1479. node_set_offline(nid);
  1480. unregister_one_node(nid);
  1481. }
  1482. EXPORT_SYMBOL(try_offline_node);
  1483. static int __ref try_remove_memory(int nid, u64 start, u64 size)
  1484. {
  1485. int rc = 0;
  1486. BUG_ON(check_hotplug_memory_range(start, size));
  1487. /*
  1488. * All memory blocks must be offlined before removing memory. Check
  1489. * whether all memory blocks in question are offline and return error
  1490. * if this is not the case.
  1491. */
  1492. rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
  1493. if (rc)
  1494. return rc;
  1495. /* remove memmap entry */
  1496. firmware_map_remove(start, start + size, "System RAM");
  1497. /*
  1498. * Memory block device removal under the device_hotplug_lock is
  1499. * a barrier against racing online attempts.
  1500. */
  1501. remove_memory_block_devices(start, size);
  1502. mem_hotplug_begin();
  1503. arch_remove_memory(nid, start, size, NULL);
  1504. if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
  1505. memblock_free(start, size);
  1506. memblock_remove(start, size);
  1507. }
  1508. release_mem_region_adjustable(start, size);
  1509. try_offline_node(nid);
  1510. mem_hotplug_done();
  1511. return 0;
  1512. }
  1513. /**
  1514. * remove_memory
  1515. * @nid: the node ID
  1516. * @start: physical address of the region to remove
  1517. * @size: size of the region to remove
  1518. *
  1519. * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  1520. * and online/offline operations before this call, as required by
  1521. * try_offline_node().
  1522. */
  1523. void __remove_memory(int nid, u64 start, u64 size)
  1524. {
  1525. /*
  1526. * trigger BUG() if some memory is not offlined prior to calling this
  1527. * function
  1528. */
  1529. if (try_remove_memory(nid, start, size))
  1530. BUG();
  1531. }
  1532. /*
  1533. * Remove memory if every memory block is offline, otherwise return -EBUSY is
  1534. * some memory is not offline
  1535. */
  1536. int remove_memory(int nid, u64 start, u64 size)
  1537. {
  1538. int rc;
  1539. lock_device_hotplug();
  1540. rc = try_remove_memory(nid, start, size);
  1541. unlock_device_hotplug();
  1542. return rc;
  1543. }
  1544. EXPORT_SYMBOL_GPL(remove_memory);
  1545. int remove_memory_subsection(int nid, u64 start, u64 size)
  1546. {
  1547. if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
  1548. !IS_ALIGNED(size, SUBSECTION_SIZE)) {
  1549. pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
  1550. __func__, start, size);
  1551. return -EINVAL;
  1552. }
  1553. mem_hotplug_begin();
  1554. arch_remove_memory(nid, start, size, NULL);
  1555. if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
  1556. memblock_remove(start, size);
  1557. release_mem_region_adjustable(start, size);
  1558. mem_hotplug_done();
  1559. return 0;
  1560. }
  1561. EXPORT_SYMBOL_GPL(remove_memory_subsection);
  1562. /*
  1563. * Try to offline and remove a memory block. Might take a long time to
  1564. * finish in case memory is still in use. Primarily useful for memory devices
  1565. * that logically unplugged all memory (so it's no longer in use) and want to
  1566. * offline + remove the memory block.
  1567. */
  1568. int offline_and_remove_memory(int nid, u64 start, u64 size)
  1569. {
  1570. struct memory_block *mem;
  1571. int rc = -EINVAL;
  1572. if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
  1573. size != memory_block_size_bytes())
  1574. return rc;
  1575. lock_device_hotplug();
  1576. mem = find_memory_block(__pfn_to_section(PFN_DOWN(start)));
  1577. if (mem)
  1578. rc = device_offline(&mem->dev);
  1579. /* Ignore if the device is already offline. */
  1580. if (rc > 0)
  1581. rc = 0;
  1582. /*
  1583. * In case we succeeded to offline the memory block, remove it.
  1584. * This cannot fail as it cannot get onlined in the meantime.
  1585. */
  1586. if (!rc) {
  1587. rc = try_remove_memory(nid, start, size);
  1588. WARN_ON_ONCE(rc);
  1589. }
  1590. unlock_device_hotplug();
  1591. return rc;
  1592. }
  1593. EXPORT_SYMBOL_GPL(offline_and_remove_memory);
  1594. #endif /* CONFIG_MEMORY_HOTREMOVE */