mm_init.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * mm_init.c - Memory initialisation verification and debugging
  4. *
  5. * Copyright 2008 IBM Corporation, 2008
  6. * Author Mel Gorman <mel@csn.ul.ie>
  7. *
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/kobject.h>
  12. #include <linux/export.h>
  13. #include <linux/memory.h>
  14. #include <linux/notifier.h>
  15. #include <linux/sched.h>
  16. #include <linux/mman.h>
  17. #include "internal.h"
  18. #ifdef CONFIG_DEBUG_MEMORY_INIT
  19. int __meminitdata mminit_loglevel;
  20. #ifndef SECTIONS_SHIFT
  21. #define SECTIONS_SHIFT 0
  22. #endif
  23. /* The zonelists are simply reported, validation is manual. */
  24. void __init mminit_verify_zonelist(void)
  25. {
  26. int nid;
  27. if (mminit_loglevel < MMINIT_VERIFY)
  28. return;
  29. for_each_online_node(nid) {
  30. pg_data_t *pgdat = NODE_DATA(nid);
  31. struct zone *zone;
  32. struct zoneref *z;
  33. struct zonelist *zonelist;
  34. int i, listid, zoneid;
  35. BUILD_BUG_ON(MAX_ZONELISTS > 2);
  36. for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
  37. /* Identify the zone and nodelist */
  38. zoneid = i % MAX_NR_ZONES;
  39. listid = i / MAX_NR_ZONES;
  40. zonelist = &pgdat->node_zonelists[listid];
  41. zone = &pgdat->node_zones[zoneid];
  42. if (!populated_zone(zone))
  43. continue;
  44. /* Print information about the zonelist */
  45. printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
  46. listid > 0 ? "thisnode" : "general", nid,
  47. zone->name);
  48. /* Iterate the zonelist */
  49. for_each_zone_zonelist(zone, z, zonelist, zoneid)
  50. pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
  51. pr_cont("\n");
  52. }
  53. }
  54. }
  55. void __init mminit_verify_pageflags_layout(void)
  56. {
  57. int shift, width;
  58. unsigned long or_mask, add_mask;
  59. shift = 8 * sizeof(unsigned long);
  60. width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
  61. - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
  62. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
  63. "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
  64. SECTIONS_WIDTH,
  65. NODES_WIDTH,
  66. ZONES_WIDTH,
  67. LAST_CPUPID_WIDTH,
  68. KASAN_TAG_WIDTH,
  69. NR_PAGEFLAGS);
  70. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
  71. "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
  72. SECTIONS_SHIFT,
  73. NODES_SHIFT,
  74. ZONES_SHIFT,
  75. LAST_CPUPID_SHIFT,
  76. KASAN_TAG_WIDTH);
  77. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
  78. "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
  79. (unsigned long)SECTIONS_PGSHIFT,
  80. (unsigned long)NODES_PGSHIFT,
  81. (unsigned long)ZONES_PGSHIFT,
  82. (unsigned long)LAST_CPUPID_PGSHIFT,
  83. (unsigned long)KASAN_TAG_PGSHIFT);
  84. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
  85. "Node/Zone ID: %lu -> %lu\n",
  86. (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
  87. (unsigned long)ZONEID_PGOFF);
  88. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
  89. "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
  90. shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
  91. #ifdef NODE_NOT_IN_PAGE_FLAGS
  92. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
  93. "Node not in page flags");
  94. #endif
  95. #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
  96. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
  97. "Last cpupid not in page flags");
  98. #endif
  99. if (SECTIONS_WIDTH) {
  100. shift -= SECTIONS_WIDTH;
  101. BUG_ON(shift != SECTIONS_PGSHIFT);
  102. }
  103. if (NODES_WIDTH) {
  104. shift -= NODES_WIDTH;
  105. BUG_ON(shift != NODES_PGSHIFT);
  106. }
  107. if (ZONES_WIDTH) {
  108. shift -= ZONES_WIDTH;
  109. BUG_ON(shift != ZONES_PGSHIFT);
  110. }
  111. /* Check for bitmask overlaps */
  112. or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
  113. (NODES_MASK << NODES_PGSHIFT) |
  114. (SECTIONS_MASK << SECTIONS_PGSHIFT);
  115. add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
  116. (NODES_MASK << NODES_PGSHIFT) +
  117. (SECTIONS_MASK << SECTIONS_PGSHIFT);
  118. BUG_ON(or_mask != add_mask);
  119. }
  120. static __init int set_mminit_loglevel(char *str)
  121. {
  122. get_option(&str, &mminit_loglevel);
  123. return 0;
  124. }
  125. early_param("mminit_loglevel", set_mminit_loglevel);
  126. #endif /* CONFIG_DEBUG_MEMORY_INIT */
  127. struct kobject *mm_kobj;
  128. EXPORT_SYMBOL_GPL(mm_kobj);
  129. #ifdef CONFIG_SMP
  130. s32 vm_committed_as_batch = 32;
  131. void mm_compute_batch(int overcommit_policy)
  132. {
  133. u64 memsized_batch;
  134. s32 nr = num_present_cpus();
  135. s32 batch = max_t(s32, nr*2, 32);
  136. unsigned long ram_pages = totalram_pages();
  137. /*
  138. * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
  139. * (total memory/#cpus), and lift it to 25% for other policies
  140. * to easy the possible lock contention for percpu_counter
  141. * vm_committed_as, while the max limit is INT_MAX
  142. */
  143. if (overcommit_policy == OVERCOMMIT_NEVER)
  144. memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
  145. else
  146. memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
  147. vm_committed_as_batch = max_t(s32, memsized_batch, batch);
  148. }
  149. static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
  150. unsigned long action, void *arg)
  151. {
  152. switch (action) {
  153. case MEM_ONLINE:
  154. case MEM_OFFLINE:
  155. mm_compute_batch(sysctl_overcommit_memory);
  156. default:
  157. break;
  158. }
  159. return NOTIFY_OK;
  160. }
  161. static struct notifier_block compute_batch_nb __meminitdata = {
  162. .notifier_call = mm_compute_batch_notifier,
  163. .priority = IPC_CALLBACK_PRI, /* use lowest priority */
  164. };
  165. static int __init mm_compute_batch_init(void)
  166. {
  167. mm_compute_batch(sysctl_overcommit_memory);
  168. register_hotmemory_notifier(&compute_batch_nb);
  169. return 0;
  170. }
  171. __initcall(mm_compute_batch_init);
  172. #endif
  173. static int __init mm_sysfs_init(void)
  174. {
  175. mm_kobj = kobject_create_and_add("mm", kernel_kobj);
  176. if (!mm_kobj)
  177. return -ENOMEM;
  178. return 0;
  179. }
  180. postcore_initcall(mm_sysfs_init);