sparse.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sparse memory mappings.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/slab.h>
  7. #include <linux/mmzone.h>
  8. #include <linux/memblock.h>
  9. #include <linux/compiler.h>
  10. #include <linux/highmem.h>
  11. #include <linux/export.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/swap.h>
  15. #include <linux/swapops.h>
  16. #include "internal.h"
  17. #include <asm/dma.h>
  18. /*
  19. * Permanent SPARSEMEM data:
  20. *
  21. * 1) mem_section - memory sections, mem_map's for valid memory
  22. */
  23. #ifdef CONFIG_SPARSEMEM_EXTREME
  24. struct mem_section **mem_section;
  25. #else
  26. struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
  27. ____cacheline_internodealigned_in_smp;
  28. #endif
  29. EXPORT_SYMBOL(mem_section);
  30. #ifdef NODE_NOT_IN_PAGE_FLAGS
  31. /*
  32. * If we did not store the node number in the page then we have to
  33. * do a lookup in the section_to_node_table in order to find which
  34. * node the page belongs to.
  35. */
  36. #if MAX_NUMNODES <= 256
  37. static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  38. #else
  39. static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  40. #endif
  41. int page_to_nid(const struct page *page)
  42. {
  43. return section_to_node_table[page_to_section(page)];
  44. }
  45. EXPORT_SYMBOL(page_to_nid);
  46. static void set_section_nid(unsigned long section_nr, int nid)
  47. {
  48. section_to_node_table[section_nr] = nid;
  49. }
  50. #else /* !NODE_NOT_IN_PAGE_FLAGS */
  51. static inline void set_section_nid(unsigned long section_nr, int nid)
  52. {
  53. }
  54. #endif
  55. #ifdef CONFIG_SPARSEMEM_EXTREME
  56. static noinline struct mem_section __ref *sparse_index_alloc(int nid)
  57. {
  58. struct mem_section *section = NULL;
  59. unsigned long array_size = SECTIONS_PER_ROOT *
  60. sizeof(struct mem_section);
  61. if (slab_is_available()) {
  62. section = kzalloc_node(array_size, GFP_KERNEL, nid);
  63. } else {
  64. section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
  65. nid);
  66. if (!section)
  67. panic("%s: Failed to allocate %lu bytes nid=%d\n",
  68. __func__, array_size, nid);
  69. }
  70. return section;
  71. }
  72. static int __meminit sparse_index_init(unsigned long section_nr, int nid)
  73. {
  74. unsigned long root = SECTION_NR_TO_ROOT(section_nr);
  75. struct mem_section *section;
  76. /*
  77. * An existing section is possible in the sub-section hotplug
  78. * case. First hot-add instantiates, follow-on hot-add reuses
  79. * the existing section.
  80. *
  81. * The mem_hotplug_lock resolves the apparent race below.
  82. */
  83. if (mem_section[root])
  84. return 0;
  85. section = sparse_index_alloc(nid);
  86. if (!section)
  87. return -ENOMEM;
  88. mem_section[root] = section;
  89. return 0;
  90. }
  91. #else /* !SPARSEMEM_EXTREME */
  92. static inline int sparse_index_init(unsigned long section_nr, int nid)
  93. {
  94. return 0;
  95. }
  96. #endif
  97. #ifdef CONFIG_SPARSEMEM_EXTREME
  98. unsigned long __section_nr(struct mem_section *ms)
  99. {
  100. unsigned long root_nr;
  101. struct mem_section *root = NULL;
  102. for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
  103. root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
  104. if (!root)
  105. continue;
  106. if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
  107. break;
  108. }
  109. VM_BUG_ON(!root);
  110. return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
  111. }
  112. #else
  113. unsigned long __section_nr(struct mem_section *ms)
  114. {
  115. return (unsigned long)(ms - mem_section[0]);
  116. }
  117. #endif
  118. /*
  119. * During early boot, before section_mem_map is used for an actual
  120. * mem_map, we use section_mem_map to store the section's NUMA
  121. * node. This keeps us from having to use another data structure. The
  122. * node information is cleared just before we store the real mem_map.
  123. */
  124. static inline unsigned long sparse_encode_early_nid(int nid)
  125. {
  126. return (nid << SECTION_NID_SHIFT);
  127. }
  128. static inline int sparse_early_nid(struct mem_section *section)
  129. {
  130. return (section->section_mem_map >> SECTION_NID_SHIFT);
  131. }
  132. /* Validate the physical addressing limitations of the model */
  133. void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
  134. unsigned long *end_pfn)
  135. {
  136. unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
  137. /*
  138. * Sanity checks - do not allow an architecture to pass
  139. * in larger pfns than the maximum scope of sparsemem:
  140. */
  141. if (*start_pfn > max_sparsemem_pfn) {
  142. mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
  143. "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
  144. *start_pfn, *end_pfn, max_sparsemem_pfn);
  145. WARN_ON_ONCE(1);
  146. *start_pfn = max_sparsemem_pfn;
  147. *end_pfn = max_sparsemem_pfn;
  148. } else if (*end_pfn > max_sparsemem_pfn) {
  149. mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
  150. "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
  151. *start_pfn, *end_pfn, max_sparsemem_pfn);
  152. WARN_ON_ONCE(1);
  153. *end_pfn = max_sparsemem_pfn;
  154. }
  155. }
  156. /*
  157. * There are a number of times that we loop over NR_MEM_SECTIONS,
  158. * looking for section_present() on each. But, when we have very
  159. * large physical address spaces, NR_MEM_SECTIONS can also be
  160. * very large which makes the loops quite long.
  161. *
  162. * Keeping track of this gives us an easy way to break out of
  163. * those loops early.
  164. */
  165. unsigned long __highest_present_section_nr;
  166. static void section_mark_present(struct mem_section *ms)
  167. {
  168. unsigned long section_nr = __section_nr(ms);
  169. if (section_nr > __highest_present_section_nr)
  170. __highest_present_section_nr = section_nr;
  171. ms->section_mem_map |= SECTION_MARKED_PRESENT;
  172. }
  173. #define for_each_present_section_nr(start, section_nr) \
  174. for (section_nr = next_present_section_nr(start-1); \
  175. ((section_nr != -1) && \
  176. (section_nr <= __highest_present_section_nr)); \
  177. section_nr = next_present_section_nr(section_nr))
  178. static inline unsigned long first_present_section_nr(void)
  179. {
  180. return next_present_section_nr(-1);
  181. }
  182. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  183. static void subsection_mask_set(unsigned long *map, unsigned long pfn,
  184. unsigned long nr_pages)
  185. {
  186. int idx = subsection_map_index(pfn);
  187. int end = subsection_map_index(pfn + nr_pages - 1);
  188. bitmap_set(map, idx, end - idx + 1);
  189. }
  190. void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
  191. {
  192. int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
  193. unsigned long nr, start_sec = pfn_to_section_nr(pfn);
  194. if (!nr_pages)
  195. return;
  196. for (nr = start_sec; nr <= end_sec; nr++) {
  197. struct mem_section *ms;
  198. unsigned long pfns;
  199. pfns = min(nr_pages, PAGES_PER_SECTION
  200. - (pfn & ~PAGE_SECTION_MASK));
  201. ms = __nr_to_section(nr);
  202. subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
  203. pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
  204. pfns, subsection_map_index(pfn),
  205. subsection_map_index(pfn + pfns - 1));
  206. pfn += pfns;
  207. nr_pages -= pfns;
  208. }
  209. }
  210. #else
  211. void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
  212. {
  213. }
  214. #endif
  215. /* Record a memory area against a node. */
  216. static void __init memory_present(int nid, unsigned long start, unsigned long end)
  217. {
  218. unsigned long pfn;
  219. #ifdef CONFIG_SPARSEMEM_EXTREME
  220. if (unlikely(!mem_section)) {
  221. unsigned long size, align;
  222. size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
  223. align = 1 << (INTERNODE_CACHE_SHIFT);
  224. mem_section = memblock_alloc(size, align);
  225. if (!mem_section)
  226. panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  227. __func__, size, align);
  228. }
  229. #endif
  230. start &= PAGE_SECTION_MASK;
  231. mminit_validate_memmodel_limits(&start, &end);
  232. for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
  233. unsigned long section = pfn_to_section_nr(pfn);
  234. struct mem_section *ms;
  235. sparse_index_init(section, nid);
  236. set_section_nid(section, nid);
  237. ms = __nr_to_section(section);
  238. if (!ms->section_mem_map) {
  239. ms->section_mem_map = sparse_encode_early_nid(nid) |
  240. SECTION_IS_ONLINE;
  241. section_mark_present(ms);
  242. }
  243. }
  244. }
  245. /*
  246. * Mark all memblocks as present using memory_present().
  247. * This is a convenience function that is useful to mark all of the systems
  248. * memory as present during initialization.
  249. */
  250. static void __init memblocks_present(void)
  251. {
  252. unsigned long start, end;
  253. int i, nid;
  254. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
  255. memory_present(nid, start, end);
  256. }
  257. /*
  258. * Subtle, we encode the real pfn into the mem_map such that
  259. * the identity pfn - section_mem_map will return the actual
  260. * physical page frame number.
  261. */
  262. static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
  263. {
  264. unsigned long coded_mem_map =
  265. (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
  266. BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
  267. BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
  268. return coded_mem_map;
  269. }
  270. #ifdef CONFIG_MEMORY_HOTPLUG
  271. /*
  272. * Decode mem_map from the coded memmap
  273. */
  274. struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
  275. {
  276. /* mask off the extra low bits of information */
  277. coded_mem_map &= SECTION_MAP_MASK;
  278. return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
  279. }
  280. #endif /* CONFIG_MEMORY_HOTPLUG */
  281. static void __meminit sparse_init_one_section(struct mem_section *ms,
  282. unsigned long pnum, struct page *mem_map,
  283. struct mem_section_usage *usage, unsigned long flags)
  284. {
  285. ms->section_mem_map &= ~SECTION_MAP_MASK;
  286. ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
  287. | SECTION_HAS_MEM_MAP | flags;
  288. ms->usage = usage;
  289. }
  290. static unsigned long usemap_size(void)
  291. {
  292. return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
  293. }
  294. size_t mem_section_usage_size(void)
  295. {
  296. return sizeof(struct mem_section_usage) + usemap_size();
  297. }
  298. #ifdef CONFIG_MEMORY_HOTREMOVE
  299. static struct mem_section_usage * __init
  300. sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
  301. unsigned long size)
  302. {
  303. struct mem_section_usage *usage;
  304. unsigned long goal, limit;
  305. int nid;
  306. /*
  307. * A page may contain usemaps for other sections preventing the
  308. * page being freed and making a section unremovable while
  309. * other sections referencing the usemap remain active. Similarly,
  310. * a pgdat can prevent a section being removed. If section A
  311. * contains a pgdat and section B contains the usemap, both
  312. * sections become inter-dependent. This allocates usemaps
  313. * from the same section as the pgdat where possible to avoid
  314. * this problem.
  315. */
  316. goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
  317. limit = goal + (1UL << PA_SECTION_SHIFT);
  318. nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
  319. again:
  320. usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
  321. if (!usage && limit) {
  322. limit = 0;
  323. goto again;
  324. }
  325. return usage;
  326. }
  327. static void __init check_usemap_section_nr(int nid,
  328. struct mem_section_usage *usage)
  329. {
  330. unsigned long usemap_snr, pgdat_snr;
  331. static unsigned long old_usemap_snr;
  332. static unsigned long old_pgdat_snr;
  333. struct pglist_data *pgdat = NODE_DATA(nid);
  334. int usemap_nid;
  335. /* First call */
  336. if (!old_usemap_snr) {
  337. old_usemap_snr = NR_MEM_SECTIONS;
  338. old_pgdat_snr = NR_MEM_SECTIONS;
  339. }
  340. usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
  341. pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
  342. if (usemap_snr == pgdat_snr)
  343. return;
  344. if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
  345. /* skip redundant message */
  346. return;
  347. old_usemap_snr = usemap_snr;
  348. old_pgdat_snr = pgdat_snr;
  349. usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
  350. if (usemap_nid != nid) {
  351. pr_info("node %d must be removed before remove section %ld\n",
  352. nid, usemap_snr);
  353. return;
  354. }
  355. /*
  356. * There is a circular dependency.
  357. * Some platforms allow un-removable section because they will just
  358. * gather other removable sections for dynamic partitioning.
  359. * Just notify un-removable section's number here.
  360. */
  361. pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
  362. usemap_snr, pgdat_snr, nid);
  363. }
  364. #else
  365. static struct mem_section_usage * __init
  366. sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
  367. unsigned long size)
  368. {
  369. return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
  370. }
  371. static void __init check_usemap_section_nr(int nid,
  372. struct mem_section_usage *usage)
  373. {
  374. }
  375. #endif /* CONFIG_MEMORY_HOTREMOVE */
  376. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  377. static unsigned long __init section_map_size(void)
  378. {
  379. return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
  380. }
  381. #else
  382. static unsigned long __init section_map_size(void)
  383. {
  384. return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
  385. }
  386. struct page __init *__populate_section_memmap(unsigned long pfn,
  387. unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
  388. {
  389. unsigned long size = section_map_size();
  390. struct page *map = sparse_buffer_alloc(size);
  391. phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
  392. if (map)
  393. return map;
  394. map = memblock_alloc_try_nid_raw(size, size, addr,
  395. MEMBLOCK_ALLOC_ACCESSIBLE, nid);
  396. if (!map)
  397. panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
  398. __func__, size, PAGE_SIZE, nid, &addr);
  399. return map;
  400. }
  401. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  402. static void *sparsemap_buf __meminitdata;
  403. static void *sparsemap_buf_end __meminitdata;
  404. static inline void __meminit sparse_buffer_free(unsigned long size)
  405. {
  406. WARN_ON(!sparsemap_buf || size == 0);
  407. memblock_free_early(__pa(sparsemap_buf), size);
  408. }
  409. static void __init sparse_buffer_init(unsigned long size, int nid)
  410. {
  411. phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
  412. WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
  413. /*
  414. * Pre-allocated buffer is mainly used by __populate_section_memmap
  415. * and we want it to be properly aligned to the section size - this is
  416. * especially the case for VMEMMAP which maps memmap to PMDs
  417. */
  418. sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
  419. addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
  420. sparsemap_buf_end = sparsemap_buf + size;
  421. }
  422. static void __init sparse_buffer_fini(void)
  423. {
  424. unsigned long size = sparsemap_buf_end - sparsemap_buf;
  425. if (sparsemap_buf && size > 0)
  426. sparse_buffer_free(size);
  427. sparsemap_buf = NULL;
  428. }
  429. void * __meminit sparse_buffer_alloc(unsigned long size)
  430. {
  431. void *ptr = NULL;
  432. if (sparsemap_buf) {
  433. ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
  434. if (ptr + size > sparsemap_buf_end)
  435. ptr = NULL;
  436. else {
  437. /* Free redundant aligned space */
  438. if ((unsigned long)(ptr - sparsemap_buf) > 0)
  439. sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
  440. sparsemap_buf = ptr + size;
  441. }
  442. }
  443. return ptr;
  444. }
  445. void __weak __meminit vmemmap_populate_print_last(void)
  446. {
  447. }
  448. /*
  449. * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
  450. * And number of present sections in this node is map_count.
  451. */
  452. static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
  453. unsigned long pnum_end,
  454. unsigned long map_count)
  455. {
  456. struct mem_section_usage *usage;
  457. unsigned long pnum;
  458. struct page *map;
  459. usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
  460. mem_section_usage_size() * map_count);
  461. if (!usage) {
  462. pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
  463. goto failed;
  464. }
  465. sparse_buffer_init(map_count * section_map_size(), nid);
  466. for_each_present_section_nr(pnum_begin, pnum) {
  467. unsigned long pfn = section_nr_to_pfn(pnum);
  468. if (pnum >= pnum_end)
  469. break;
  470. map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
  471. nid, NULL);
  472. if (!map) {
  473. pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
  474. __func__, nid);
  475. pnum_begin = pnum;
  476. sparse_buffer_fini();
  477. goto failed;
  478. }
  479. check_usemap_section_nr(nid, usage);
  480. sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
  481. SECTION_IS_EARLY);
  482. usage = (void *) usage + mem_section_usage_size();
  483. }
  484. sparse_buffer_fini();
  485. return;
  486. failed:
  487. /* We failed to allocate, mark all the following pnums as not present */
  488. for_each_present_section_nr(pnum_begin, pnum) {
  489. struct mem_section *ms;
  490. if (pnum >= pnum_end)
  491. break;
  492. ms = __nr_to_section(pnum);
  493. ms->section_mem_map = 0;
  494. }
  495. }
  496. /*
  497. * Allocate the accumulated non-linear sections, allocate a mem_map
  498. * for each and record the physical to section mapping.
  499. */
  500. void __init sparse_init(void)
  501. {
  502. unsigned long pnum_end, pnum_begin, map_count = 1;
  503. int nid_begin;
  504. memblocks_present();
  505. pnum_begin = first_present_section_nr();
  506. nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
  507. /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
  508. set_pageblock_order();
  509. for_each_present_section_nr(pnum_begin + 1, pnum_end) {
  510. int nid = sparse_early_nid(__nr_to_section(pnum_end));
  511. if (nid == nid_begin) {
  512. map_count++;
  513. continue;
  514. }
  515. /* Init node with sections in range [pnum_begin, pnum_end) */
  516. sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
  517. nid_begin = nid;
  518. pnum_begin = pnum_end;
  519. map_count = 1;
  520. }
  521. /* cover the last node */
  522. sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
  523. vmemmap_populate_print_last();
  524. }
  525. #ifdef CONFIG_MEMORY_HOTPLUG
  526. /* Mark all memory sections within the pfn range as online */
  527. void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
  528. {
  529. unsigned long pfn;
  530. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  531. unsigned long section_nr = pfn_to_section_nr(pfn);
  532. struct mem_section *ms;
  533. /* onlining code should never touch invalid ranges */
  534. if (WARN_ON(!valid_section_nr(section_nr)))
  535. continue;
  536. ms = __nr_to_section(section_nr);
  537. ms->section_mem_map |= SECTION_IS_ONLINE;
  538. }
  539. }
  540. #ifdef CONFIG_MEMORY_HOTREMOVE
  541. /* Mark all memory sections within the pfn range as offline */
  542. void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
  543. {
  544. unsigned long pfn;
  545. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  546. unsigned long section_nr = pfn_to_section_nr(pfn);
  547. struct mem_section *ms;
  548. /*
  549. * TODO this needs some double checking. Offlining code makes
  550. * sure to check pfn_valid but those checks might be just bogus
  551. */
  552. if (WARN_ON(!valid_section_nr(section_nr)))
  553. continue;
  554. ms = __nr_to_section(section_nr);
  555. ms->section_mem_map &= ~SECTION_IS_ONLINE;
  556. }
  557. }
  558. #endif
  559. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  560. static struct page * __meminit populate_section_memmap(unsigned long pfn,
  561. unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
  562. {
  563. return __populate_section_memmap(pfn, nr_pages, nid, altmap);
  564. }
  565. static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
  566. struct vmem_altmap *altmap)
  567. {
  568. unsigned long start = (unsigned long) pfn_to_page(pfn);
  569. unsigned long end = start + nr_pages * sizeof(struct page);
  570. vmemmap_free(start, end, altmap);
  571. }
  572. static void free_map_bootmem(struct page *memmap)
  573. {
  574. unsigned long start = (unsigned long)memmap;
  575. unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
  576. vmemmap_free(start, end, NULL);
  577. }
  578. static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
  579. {
  580. DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
  581. DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
  582. struct mem_section *ms = __pfn_to_section(pfn);
  583. unsigned long *subsection_map = ms->usage
  584. ? &ms->usage->subsection_map[0] : NULL;
  585. subsection_mask_set(map, pfn, nr_pages);
  586. if (subsection_map)
  587. bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
  588. if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
  589. "section already deactivated (%#lx + %ld)\n",
  590. pfn, nr_pages))
  591. return -EINVAL;
  592. bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
  593. return 0;
  594. }
  595. static bool is_subsection_map_empty(struct mem_section *ms)
  596. {
  597. return bitmap_empty(&ms->usage->subsection_map[0],
  598. SUBSECTIONS_PER_SECTION);
  599. }
  600. static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
  601. {
  602. struct mem_section *ms = __pfn_to_section(pfn);
  603. DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
  604. unsigned long *subsection_map;
  605. int rc = 0;
  606. subsection_mask_set(map, pfn, nr_pages);
  607. subsection_map = &ms->usage->subsection_map[0];
  608. if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
  609. rc = -EINVAL;
  610. else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
  611. rc = -EEXIST;
  612. else
  613. bitmap_or(subsection_map, map, subsection_map,
  614. SUBSECTIONS_PER_SECTION);
  615. return rc;
  616. }
  617. #else
  618. struct page * __meminit populate_section_memmap(unsigned long pfn,
  619. unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
  620. {
  621. return kvmalloc_node(array_size(sizeof(struct page),
  622. PAGES_PER_SECTION), GFP_KERNEL, nid);
  623. }
  624. static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
  625. struct vmem_altmap *altmap)
  626. {
  627. kvfree(pfn_to_page(pfn));
  628. }
  629. static void free_map_bootmem(struct page *memmap)
  630. {
  631. unsigned long maps_section_nr, removing_section_nr, i;
  632. unsigned long magic, nr_pages;
  633. struct page *page = virt_to_page(memmap);
  634. nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
  635. >> PAGE_SHIFT;
  636. for (i = 0; i < nr_pages; i++, page++) {
  637. magic = (unsigned long) page->freelist;
  638. BUG_ON(magic == NODE_INFO);
  639. maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
  640. removing_section_nr = page_private(page);
  641. /*
  642. * When this function is called, the removing section is
  643. * logical offlined state. This means all pages are isolated
  644. * from page allocator. If removing section's memmap is placed
  645. * on the same section, it must not be freed.
  646. * If it is freed, page allocator may allocate it which will
  647. * be removed physically soon.
  648. */
  649. if (maps_section_nr != removing_section_nr)
  650. put_page_bootmem(page);
  651. }
  652. }
  653. static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
  654. {
  655. return 0;
  656. }
  657. static bool is_subsection_map_empty(struct mem_section *ms)
  658. {
  659. return true;
  660. }
  661. static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
  662. {
  663. return 0;
  664. }
  665. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  666. /*
  667. * To deactivate a memory region, there are 3 cases to handle across
  668. * two configurations (SPARSEMEM_VMEMMAP={y,n}):
  669. *
  670. * 1. deactivation of a partial hot-added section (only possible in
  671. * the SPARSEMEM_VMEMMAP=y case).
  672. * a) section was present at memory init.
  673. * b) section was hot-added post memory init.
  674. * 2. deactivation of a complete hot-added section.
  675. * 3. deactivation of a complete section from memory init.
  676. *
  677. * For 1, when subsection_map does not empty we will not be freeing the
  678. * usage map, but still need to free the vmemmap range.
  679. *
  680. * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
  681. */
  682. static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
  683. struct vmem_altmap *altmap)
  684. {
  685. struct mem_section *ms = __pfn_to_section(pfn);
  686. bool section_is_early = early_section(ms);
  687. struct page *memmap = NULL;
  688. bool empty;
  689. if (clear_subsection_map(pfn, nr_pages))
  690. return;
  691. empty = is_subsection_map_empty(ms);
  692. if (empty) {
  693. unsigned long section_nr = pfn_to_section_nr(pfn);
  694. /*
  695. * When removing an early section, the usage map is kept (as the
  696. * usage maps of other sections fall into the same page). It
  697. * will be re-used when re-adding the section - which is then no
  698. * longer an early section. If the usage map is PageReserved, it
  699. * was allocated during boot.
  700. */
  701. if (!PageReserved(virt_to_page(ms->usage))) {
  702. kfree(ms->usage);
  703. ms->usage = NULL;
  704. }
  705. memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
  706. /*
  707. * Mark the section invalid so that valid_section()
  708. * return false. This prevents code from dereferencing
  709. * ms->usage array.
  710. */
  711. ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
  712. }
  713. /*
  714. * The memmap of early sections is always fully populated. See
  715. * section_activate() and pfn_valid() .
  716. */
  717. if (!section_is_early)
  718. depopulate_section_memmap(pfn, nr_pages, altmap);
  719. else if (memmap)
  720. free_map_bootmem(memmap);
  721. if (empty)
  722. ms->section_mem_map = (unsigned long)NULL;
  723. }
  724. static struct page * __meminit section_activate(int nid, unsigned long pfn,
  725. unsigned long nr_pages, struct vmem_altmap *altmap)
  726. {
  727. struct mem_section *ms = __pfn_to_section(pfn);
  728. struct mem_section_usage *usage = NULL;
  729. struct page *memmap;
  730. int rc = 0;
  731. if (!ms->usage) {
  732. usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
  733. if (!usage)
  734. return ERR_PTR(-ENOMEM);
  735. ms->usage = usage;
  736. }
  737. rc = fill_subsection_map(pfn, nr_pages);
  738. if (rc) {
  739. if (usage)
  740. ms->usage = NULL;
  741. kfree(usage);
  742. return ERR_PTR(rc);
  743. }
  744. /*
  745. * The early init code does not consider partially populated
  746. * initial sections, it simply assumes that memory will never be
  747. * referenced. If we hot-add memory into such a section then we
  748. * do not need to populate the memmap and can simply reuse what
  749. * is already there.
  750. */
  751. if (nr_pages < PAGES_PER_SECTION && early_section(ms))
  752. return pfn_to_page(pfn);
  753. memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
  754. if (!memmap) {
  755. section_deactivate(pfn, nr_pages, altmap);
  756. return ERR_PTR(-ENOMEM);
  757. }
  758. return memmap;
  759. }
  760. /**
  761. * sparse_add_section - add a memory section, or populate an existing one
  762. * @nid: The node to add section on
  763. * @start_pfn: start pfn of the memory range
  764. * @nr_pages: number of pfns to add in the section
  765. * @altmap: device page map
  766. *
  767. * This is only intended for hotplug.
  768. *
  769. * Note that only VMEMMAP supports sub-section aligned hotplug,
  770. * the proper alignment and size are gated by check_pfn_span().
  771. *
  772. *
  773. * Return:
  774. * * 0 - On success.
  775. * * -EEXIST - Section has been present.
  776. * * -ENOMEM - Out of memory.
  777. */
  778. int __meminit sparse_add_section(int nid, unsigned long start_pfn,
  779. unsigned long nr_pages, struct vmem_altmap *altmap)
  780. {
  781. unsigned long section_nr = pfn_to_section_nr(start_pfn);
  782. struct mem_section *ms;
  783. struct page *memmap;
  784. int ret;
  785. ret = sparse_index_init(section_nr, nid);
  786. if (ret < 0)
  787. return ret;
  788. memmap = section_activate(nid, start_pfn, nr_pages, altmap);
  789. if (IS_ERR(memmap))
  790. return PTR_ERR(memmap);
  791. /*
  792. * Poison uninitialized struct pages in order to catch invalid flags
  793. * combinations.
  794. */
  795. page_init_poison(memmap, sizeof(struct page) * nr_pages);
  796. ms = __nr_to_section(section_nr);
  797. set_section_nid(section_nr, nid);
  798. section_mark_present(ms);
  799. /* Align memmap to section boundary in the subsection case */
  800. if (section_nr_to_pfn(section_nr) != start_pfn)
  801. memmap = pfn_to_page(section_nr_to_pfn(section_nr));
  802. sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
  803. return 0;
  804. }
  805. #ifdef CONFIG_MEMORY_FAILURE
  806. static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
  807. {
  808. int i;
  809. /*
  810. * A further optimization is to have per section refcounted
  811. * num_poisoned_pages. But that would need more space per memmap, so
  812. * for now just do a quick global check to speed up this routine in the
  813. * absence of bad pages.
  814. */
  815. if (atomic_long_read(&num_poisoned_pages) == 0)
  816. return;
  817. for (i = 0; i < nr_pages; i++) {
  818. if (PageHWPoison(&memmap[i])) {
  819. num_poisoned_pages_dec();
  820. ClearPageHWPoison(&memmap[i]);
  821. }
  822. }
  823. }
  824. #else
  825. static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
  826. {
  827. }
  828. #endif
  829. void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
  830. unsigned long nr_pages, unsigned long map_offset,
  831. struct vmem_altmap *altmap)
  832. {
  833. clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
  834. nr_pages - map_offset);
  835. section_deactivate(pfn, nr_pages, altmap);
  836. }
  837. #endif /* CONFIG_MEMORY_HOTPLUG */