mmzone.h 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_MMZONE_H
  3. #define _LINUX_MMZONE_H
  4. #ifndef __ASSEMBLY__
  5. #ifndef __GENERATING_BOUNDS_H
  6. #include <linux/spinlock.h>
  7. #include <linux/list.h>
  8. #include <linux/wait.h>
  9. #include <linux/bitops.h>
  10. #include <linux/cache.h>
  11. #include <linux/threads.h>
  12. #include <linux/numa.h>
  13. #include <linux/init.h>
  14. #include <linux/seqlock.h>
  15. #include <linux/nodemask.h>
  16. #include <linux/pageblock-flags.h>
  17. #include <linux/page-flags-layout.h>
  18. #include <linux/atomic.h>
  19. #include <linux/mm_types.h>
  20. #include <linux/page-flags.h>
  21. #include <linux/android_kabi.h>
  22. #include <asm/page.h>
  23. /* Free memory management - zoned buddy allocator. */
  24. #ifndef CONFIG_FORCE_MAX_ZONEORDER
  25. #define MAX_ORDER 11
  26. #else
  27. #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
  28. #endif
  29. #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
  30. /*
  31. * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
  32. * costly to service. That is between allocation orders which should
  33. * coalesce naturally under reasonable reclaim pressure and those which
  34. * will not.
  35. */
  36. #define PAGE_ALLOC_COSTLY_ORDER 3
  37. #define MAX_KSWAPD_THREADS 16
  38. enum migratetype {
  39. MIGRATE_UNMOVABLE,
  40. MIGRATE_MOVABLE,
  41. MIGRATE_RECLAIMABLE,
  42. #ifdef CONFIG_CMA
  43. /*
  44. * MIGRATE_CMA migration type is designed to mimic the way
  45. * ZONE_MOVABLE works. Only movable pages can be allocated
  46. * from MIGRATE_CMA pageblocks and page allocator never
  47. * implicitly change migration type of MIGRATE_CMA pageblock.
  48. *
  49. * The way to use it is to change migratetype of a range of
  50. * pageblocks to MIGRATE_CMA which can be done by
  51. * __free_pageblock_cma() function. What is important though
  52. * is that a range of pageblocks must be aligned to
  53. * MAX_ORDER_NR_PAGES should biggest page be bigger then
  54. * a single pageblock.
  55. */
  56. MIGRATE_CMA,
  57. #endif
  58. MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
  59. MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
  60. #ifdef CONFIG_MEMORY_ISOLATION
  61. MIGRATE_ISOLATE, /* can't allocate from here */
  62. #endif
  63. MIGRATE_TYPES
  64. };
  65. /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
  66. extern const char * const migratetype_names[MIGRATE_TYPES];
  67. #ifdef CONFIG_CMA
  68. # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
  69. # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
  70. # define get_cma_migrate_type() MIGRATE_CMA
  71. #else
  72. # define is_migrate_cma(migratetype) false
  73. # define is_migrate_cma_page(_page) false
  74. # define get_cma_migrate_type() MIGRATE_MOVABLE
  75. #endif
  76. static inline bool is_migrate_movable(int mt)
  77. {
  78. return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
  79. }
  80. #define for_each_migratetype_order(order, type) \
  81. for (order = 0; order < MAX_ORDER; order++) \
  82. for (type = 0; type < MIGRATE_TYPES; type++)
  83. extern int page_group_by_mobility_disabled;
  84. #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
  85. #define get_pageblock_migratetype(page) \
  86. get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
  87. struct free_area {
  88. struct list_head free_list[MIGRATE_TYPES];
  89. unsigned long nr_free;
  90. };
  91. static inline struct page *get_page_from_free_area(struct free_area *area,
  92. int migratetype)
  93. {
  94. return list_first_entry_or_null(&area->free_list[migratetype],
  95. struct page, lru);
  96. }
  97. static inline bool free_area_empty(struct free_area *area, int migratetype)
  98. {
  99. return list_empty(&area->free_list[migratetype]);
  100. }
  101. struct pglist_data;
  102. /*
  103. * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
  104. * So add a wild amount of padding here to ensure that they fall into separate
  105. * cachelines. There are very few zone structures in the machine, so space
  106. * consumption is not a concern here.
  107. */
  108. #if defined(CONFIG_SMP)
  109. struct zone_padding {
  110. char x[0];
  111. } ____cacheline_internodealigned_in_smp;
  112. #define ZONE_PADDING(name) struct zone_padding name;
  113. #else
  114. #define ZONE_PADDING(name)
  115. #endif
  116. #ifdef CONFIG_NUMA
  117. enum numa_stat_item {
  118. NUMA_HIT, /* allocated in intended node */
  119. NUMA_MISS, /* allocated in non intended node */
  120. NUMA_FOREIGN, /* was intended here, hit elsewhere */
  121. NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
  122. NUMA_LOCAL, /* allocation from local node */
  123. NUMA_OTHER, /* allocation from other node */
  124. NR_VM_NUMA_STAT_ITEMS
  125. };
  126. #else
  127. #define NR_VM_NUMA_STAT_ITEMS 0
  128. #endif
  129. enum zone_stat_item {
  130. /* First 128 byte cacheline (assuming 64 bit words) */
  131. NR_FREE_PAGES,
  132. NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
  133. NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
  134. NR_ZONE_ACTIVE_ANON,
  135. NR_ZONE_INACTIVE_FILE,
  136. NR_ZONE_ACTIVE_FILE,
  137. NR_ZONE_UNEVICTABLE,
  138. NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
  139. NR_MLOCK, /* mlock()ed pages found and moved off LRU */
  140. NR_PAGETABLE, /* used for pagetables */
  141. /* Second 128 byte cacheline */
  142. NR_BOUNCE,
  143. NR_ZSPAGES, /* allocated in zsmalloc */
  144. NR_FREE_CMA_PAGES,
  145. NR_VM_ZONE_STAT_ITEMS };
  146. enum node_stat_item {
  147. NR_LRU_BASE,
  148. NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
  149. NR_ACTIVE_ANON, /* " " " " " */
  150. NR_INACTIVE_FILE, /* " " " " " */
  151. NR_ACTIVE_FILE, /* " " " " " */
  152. NR_UNEVICTABLE, /* " " " " " */
  153. NR_SLAB_RECLAIMABLE_B,
  154. NR_SLAB_UNRECLAIMABLE_B,
  155. NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
  156. NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
  157. WORKINGSET_NODES,
  158. WORKINGSET_REFAULT_BASE,
  159. WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
  160. WORKINGSET_REFAULT_FILE,
  161. WORKINGSET_ACTIVATE_BASE,
  162. WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
  163. WORKINGSET_ACTIVATE_FILE,
  164. WORKINGSET_RESTORE_BASE,
  165. WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
  166. WORKINGSET_RESTORE_FILE,
  167. WORKINGSET_NODERECLAIM,
  168. NR_ANON_MAPPED, /* Mapped anonymous pages */
  169. NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
  170. only modified from process context */
  171. NR_FILE_PAGES,
  172. NR_FILE_DIRTY,
  173. NR_WRITEBACK,
  174. NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
  175. NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
  176. NR_SHMEM_THPS,
  177. NR_SHMEM_PMDMAPPED,
  178. NR_FILE_THPS,
  179. NR_FILE_PMDMAPPED,
  180. NR_ANON_THPS,
  181. NR_VMSCAN_WRITE,
  182. NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
  183. NR_DIRTIED, /* page dirtyings since bootup */
  184. NR_WRITTEN, /* page writings since bootup */
  185. NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
  186. NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
  187. NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
  188. NR_KERNEL_STACK_KB, /* measured in KiB */
  189. #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
  190. NR_KERNEL_SCS_KB, /* measured in KiB */
  191. #endif
  192. NR_VM_NODE_STAT_ITEMS
  193. };
  194. /*
  195. * Returns true if the value is measured in bytes (most vmstat values are
  196. * measured in pages). This defines the API part, the internal representation
  197. * might be different.
  198. */
  199. static __always_inline bool vmstat_item_in_bytes(int idx)
  200. {
  201. /*
  202. * Global and per-node slab counters track slab pages.
  203. * It's expected that changes are multiples of PAGE_SIZE.
  204. * Internally values are stored in pages.
  205. *
  206. * Per-memcg and per-lruvec counters track memory, consumed
  207. * by individual slab objects. These counters are actually
  208. * byte-precise.
  209. */
  210. return (idx == NR_SLAB_RECLAIMABLE_B ||
  211. idx == NR_SLAB_UNRECLAIMABLE_B);
  212. }
  213. /*
  214. * We do arithmetic on the LRU lists in various places in the code,
  215. * so it is important to keep the active lists LRU_ACTIVE higher in
  216. * the array than the corresponding inactive lists, and to keep
  217. * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
  218. *
  219. * This has to be kept in sync with the statistics in zone_stat_item
  220. * above and the descriptions in vmstat_text in mm/vmstat.c
  221. */
  222. #define LRU_BASE 0
  223. #define LRU_ACTIVE 1
  224. #define LRU_FILE 2
  225. enum lru_list {
  226. LRU_INACTIVE_ANON = LRU_BASE,
  227. LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
  228. LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
  229. LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
  230. LRU_UNEVICTABLE,
  231. NR_LRU_LISTS
  232. };
  233. #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
  234. #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
  235. static inline bool is_file_lru(enum lru_list lru)
  236. {
  237. return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
  238. }
  239. static inline bool is_active_lru(enum lru_list lru)
  240. {
  241. return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
  242. }
  243. #define ANON_AND_FILE 2
  244. enum lruvec_flags {
  245. LRUVEC_CONGESTED, /* lruvec has many dirty pages
  246. * backed by a congested BDI
  247. */
  248. };
  249. struct lruvec {
  250. struct list_head lists[NR_LRU_LISTS];
  251. /*
  252. * These track the cost of reclaiming one LRU - file or anon -
  253. * over the other. As the observed cost of reclaiming one LRU
  254. * increases, the reclaim scan balance tips toward the other.
  255. */
  256. unsigned long anon_cost;
  257. unsigned long file_cost;
  258. /* Non-resident age, driven by LRU movement */
  259. atomic_long_t nonresident_age;
  260. /* Refaults at the time of last reclaim cycle */
  261. unsigned long refaults[ANON_AND_FILE];
  262. /* Various lruvec state flags (enum lruvec_flags) */
  263. unsigned long flags;
  264. #ifdef CONFIG_MEMCG
  265. struct pglist_data *pgdat;
  266. #endif
  267. };
  268. /* Isolate unmapped pages */
  269. #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
  270. /* Isolate for asynchronous migration */
  271. #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
  272. /* Isolate unevictable pages */
  273. #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
  274. /* LRU Isolation modes. */
  275. typedef unsigned __bitwise isolate_mode_t;
  276. enum zone_watermarks {
  277. WMARK_MIN,
  278. WMARK_LOW,
  279. WMARK_HIGH,
  280. NR_WMARK
  281. };
  282. #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
  283. #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
  284. #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
  285. #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
  286. struct per_cpu_pages {
  287. int count; /* number of pages in the list */
  288. int high; /* high watermark, emptying needed */
  289. int batch; /* chunk size for buddy add/remove */
  290. /* Lists of pages, one per migrate type stored on the pcp-lists */
  291. struct list_head lists[MIGRATE_PCPTYPES];
  292. };
  293. struct per_cpu_pageset {
  294. struct per_cpu_pages pcp;
  295. #ifdef CONFIG_NUMA
  296. s8 expire;
  297. u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
  298. #endif
  299. #ifdef CONFIG_SMP
  300. s8 stat_threshold;
  301. s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
  302. #endif
  303. };
  304. struct per_cpu_nodestat {
  305. s8 stat_threshold;
  306. s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
  307. };
  308. #endif /* !__GENERATING_BOUNDS.H */
  309. enum zone_type {
  310. /*
  311. * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
  312. * to DMA to all of the addressable memory (ZONE_NORMAL).
  313. * On architectures where this area covers the whole 32 bit address
  314. * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
  315. * DMA addressing constraints. This distinction is important as a 32bit
  316. * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
  317. * platforms may need both zones as they support peripherals with
  318. * different DMA addressing limitations.
  319. */
  320. #ifdef CONFIG_ZONE_DMA
  321. ZONE_DMA,
  322. #endif
  323. #ifdef CONFIG_ZONE_DMA32
  324. ZONE_DMA32,
  325. #endif
  326. /*
  327. * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
  328. * performed on pages in ZONE_NORMAL if the DMA devices support
  329. * transfers to all addressable memory.
  330. */
  331. ZONE_NORMAL,
  332. #ifdef CONFIG_HIGHMEM
  333. /*
  334. * A memory area that is only addressable by the kernel through
  335. * mapping portions into its own address space. This is for example
  336. * used by i386 to allow the kernel to address the memory beyond
  337. * 900MB. The kernel will set up special mappings (page
  338. * table entries on i386) for each page that the kernel needs to
  339. * access.
  340. */
  341. ZONE_HIGHMEM,
  342. #endif
  343. /*
  344. * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
  345. * movable pages with few exceptional cases described below. Main use
  346. * cases for ZONE_MOVABLE are to make memory offlining/unplug more
  347. * likely to succeed, and to locally limit unmovable allocations - e.g.,
  348. * to increase the number of THP/huge pages. Notable special cases are:
  349. *
  350. * 1. Pinned pages: (long-term) pinning of movable pages might
  351. * essentially turn such pages unmovable. Memory offlining might
  352. * retry a long time.
  353. * 2. memblock allocations: kernelcore/movablecore setups might create
  354. * situations where ZONE_MOVABLE contains unmovable allocations
  355. * after boot. Memory offlining and allocations fail early.
  356. * 3. Memory holes: kernelcore/movablecore setups might create very rare
  357. * situations where ZONE_MOVABLE contains memory holes after boot,
  358. * for example, if we have sections that are only partially
  359. * populated. Memory offlining and allocations fail early.
  360. * 4. PG_hwpoison pages: while poisoned pages can be skipped during
  361. * memory offlining, such pages cannot be allocated.
  362. * 5. Unmovable PG_offline pages: in paravirtualized environments,
  363. * hotplugged memory blocks might only partially be managed by the
  364. * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
  365. * parts not manged by the buddy are unmovable PG_offline pages. In
  366. * some cases (virtio-mem), such pages can be skipped during
  367. * memory offlining, however, cannot be moved/allocated. These
  368. * techniques might use alloc_contig_range() to hide previously
  369. * exposed pages from the buddy again (e.g., to implement some sort
  370. * of memory unplug in virtio-mem).
  371. *
  372. * In general, no unmovable allocations that degrade memory offlining
  373. * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
  374. * have to expect that migrating pages in ZONE_MOVABLE can fail (even
  375. * if has_unmovable_pages() states that there are no unmovable pages,
  376. * there can be false negatives).
  377. */
  378. ZONE_MOVABLE,
  379. #ifdef CONFIG_ZONE_DEVICE
  380. ZONE_DEVICE,
  381. #endif
  382. __MAX_NR_ZONES
  383. };
  384. #ifndef __GENERATING_BOUNDS_H
  385. #define ASYNC_AND_SYNC 2
  386. struct zone {
  387. /* Read-mostly fields */
  388. /* zone watermarks, access with *_wmark_pages(zone) macros */
  389. unsigned long _watermark[NR_WMARK];
  390. unsigned long watermark_boost;
  391. unsigned long nr_reserved_highatomic;
  392. /*
  393. * We don't know if the memory that we're going to allocate will be
  394. * freeable or/and it will be released eventually, so to avoid totally
  395. * wasting several GB of ram we must reserve some of the lower zone
  396. * memory (otherwise we risk to run OOM on the lower zones despite
  397. * there being tons of freeable ram on the higher zones). This array is
  398. * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
  399. * changes.
  400. */
  401. long lowmem_reserve[MAX_NR_ZONES];
  402. #ifdef CONFIG_NEED_MULTIPLE_NODES
  403. int node;
  404. #endif
  405. struct pglist_data *zone_pgdat;
  406. struct per_cpu_pageset __percpu *pageset;
  407. #ifndef CONFIG_SPARSEMEM
  408. /*
  409. * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
  410. * In SPARSEMEM, this map is stored in struct mem_section
  411. */
  412. unsigned long *pageblock_flags;
  413. #endif /* CONFIG_SPARSEMEM */
  414. /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
  415. unsigned long zone_start_pfn;
  416. /*
  417. * spanned_pages is the total pages spanned by the zone, including
  418. * holes, which is calculated as:
  419. * spanned_pages = zone_end_pfn - zone_start_pfn;
  420. *
  421. * present_pages is physical pages existing within the zone, which
  422. * is calculated as:
  423. * present_pages = spanned_pages - absent_pages(pages in holes);
  424. *
  425. * managed_pages is present pages managed by the buddy system, which
  426. * is calculated as (reserved_pages includes pages allocated by the
  427. * bootmem allocator):
  428. * managed_pages = present_pages - reserved_pages;
  429. *
  430. * cma pages is present pages that are assigned for CMA use
  431. * (MIGRATE_CMA).
  432. *
  433. * So present_pages may be used by memory hotplug or memory power
  434. * management logic to figure out unmanaged pages by checking
  435. * (present_pages - managed_pages). And managed_pages should be used
  436. * by page allocator and vm scanner to calculate all kinds of watermarks
  437. * and thresholds.
  438. *
  439. * Locking rules:
  440. *
  441. * zone_start_pfn and spanned_pages are protected by span_seqlock.
  442. * It is a seqlock because it has to be read outside of zone->lock,
  443. * and it is done in the main allocator path. But, it is written
  444. * quite infrequently.
  445. *
  446. * The span_seq lock is declared along with zone->lock because it is
  447. * frequently read in proximity to zone->lock. It's good to
  448. * give them a chance of being in the same cacheline.
  449. *
  450. * Write access to present_pages at runtime should be protected by
  451. * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
  452. * present_pages should get_online_mems() to get a stable value.
  453. */
  454. atomic_long_t managed_pages;
  455. unsigned long spanned_pages;
  456. unsigned long present_pages;
  457. #ifdef CONFIG_CMA
  458. unsigned long cma_pages;
  459. #endif
  460. const char *name;
  461. #ifdef CONFIG_MEMORY_ISOLATION
  462. /*
  463. * Number of isolated pageblock. It is used to solve incorrect
  464. * freepage counting problem due to racy retrieving migratetype
  465. * of pageblock. Protected by zone->lock.
  466. */
  467. unsigned long nr_isolate_pageblock;
  468. #endif
  469. #ifdef CONFIG_MEMORY_HOTPLUG
  470. /* see spanned/present_pages for more description */
  471. seqlock_t span_seqlock;
  472. #endif
  473. int initialized;
  474. /* Write-intensive fields used from the page allocator */
  475. ZONE_PADDING(_pad1_)
  476. /* free areas of different sizes */
  477. struct free_area free_area[MAX_ORDER];
  478. /* zone flags, see below */
  479. unsigned long flags;
  480. /* Primarily protects free_area */
  481. spinlock_t lock;
  482. /* Write-intensive fields used by compaction and vmstats. */
  483. ZONE_PADDING(_pad2_)
  484. /*
  485. * When free pages are below this point, additional steps are taken
  486. * when reading the number of free pages to avoid per-cpu counter
  487. * drift allowing watermarks to be breached
  488. */
  489. unsigned long percpu_drift_mark;
  490. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  491. /* pfn where compaction free scanner should start */
  492. unsigned long compact_cached_free_pfn;
  493. /* pfn where compaction migration scanner should start */
  494. unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
  495. unsigned long compact_init_migrate_pfn;
  496. unsigned long compact_init_free_pfn;
  497. #endif
  498. #ifdef CONFIG_COMPACTION
  499. /*
  500. * On compaction failure, 1<<compact_defer_shift compactions
  501. * are skipped before trying again. The number attempted since
  502. * last failure is tracked with compact_considered.
  503. * compact_order_failed is the minimum compaction failed order.
  504. */
  505. unsigned int compact_considered;
  506. unsigned int compact_defer_shift;
  507. int compact_order_failed;
  508. #endif
  509. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  510. /* Set to true when the PG_migrate_skip bits should be cleared */
  511. bool compact_blockskip_flush;
  512. #endif
  513. bool contiguous;
  514. ZONE_PADDING(_pad3_)
  515. /* Zone statistics */
  516. atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  517. atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
  518. ANDROID_KABI_RESERVE(1);
  519. ANDROID_KABI_RESERVE(2);
  520. ANDROID_KABI_RESERVE(3);
  521. ANDROID_KABI_RESERVE(4);
  522. } ____cacheline_internodealigned_in_smp;
  523. enum pgdat_flags {
  524. PGDAT_DIRTY, /* reclaim scanning has recently found
  525. * many dirty file pages at the tail
  526. * of the LRU.
  527. */
  528. PGDAT_WRITEBACK, /* reclaim scanning has recently found
  529. * many pages under writeback
  530. */
  531. PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
  532. };
  533. enum zone_flags {
  534. ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
  535. * Cleared when kswapd is woken.
  536. */
  537. };
  538. static inline unsigned long zone_managed_pages(struct zone *zone)
  539. {
  540. return (unsigned long)atomic_long_read(&zone->managed_pages);
  541. }
  542. static inline unsigned long zone_cma_pages(struct zone *zone)
  543. {
  544. #ifdef CONFIG_CMA
  545. return zone->cma_pages;
  546. #else
  547. return 0;
  548. #endif
  549. }
  550. static inline unsigned long zone_end_pfn(const struct zone *zone)
  551. {
  552. return zone->zone_start_pfn + zone->spanned_pages;
  553. }
  554. static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
  555. {
  556. return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
  557. }
  558. static inline bool zone_is_initialized(struct zone *zone)
  559. {
  560. return zone->initialized;
  561. }
  562. static inline bool zone_is_empty(struct zone *zone)
  563. {
  564. return zone->spanned_pages == 0;
  565. }
  566. /*
  567. * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
  568. * intersection with the given zone
  569. */
  570. static inline bool zone_intersects(struct zone *zone,
  571. unsigned long start_pfn, unsigned long nr_pages)
  572. {
  573. if (zone_is_empty(zone))
  574. return false;
  575. if (start_pfn >= zone_end_pfn(zone) ||
  576. start_pfn + nr_pages <= zone->zone_start_pfn)
  577. return false;
  578. return true;
  579. }
  580. /*
  581. * The "priority" of VM scanning is how much of the queues we will scan in one
  582. * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
  583. * queues ("queue_length >> 12") during an aging round.
  584. */
  585. #define DEF_PRIORITY 12
  586. /* Maximum number of zones on a zonelist */
  587. #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
  588. enum {
  589. ZONELIST_FALLBACK, /* zonelist with fallback */
  590. #ifdef CONFIG_NUMA
  591. /*
  592. * The NUMA zonelists are doubled because we need zonelists that
  593. * restrict the allocations to a single node for __GFP_THISNODE.
  594. */
  595. ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
  596. #endif
  597. MAX_ZONELISTS
  598. };
  599. /*
  600. * This struct contains information about a zone in a zonelist. It is stored
  601. * here to avoid dereferences into large structures and lookups of tables
  602. */
  603. struct zoneref {
  604. struct zone *zone; /* Pointer to actual zone */
  605. int zone_idx; /* zone_idx(zoneref->zone) */
  606. };
  607. /*
  608. * One allocation request operates on a zonelist. A zonelist
  609. * is a list of zones, the first one is the 'goal' of the
  610. * allocation, the other zones are fallback zones, in decreasing
  611. * priority.
  612. *
  613. * To speed the reading of the zonelist, the zonerefs contain the zone index
  614. * of the entry being read. Helper functions to access information given
  615. * a struct zoneref are
  616. *
  617. * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
  618. * zonelist_zone_idx() - Return the index of the zone for an entry
  619. * zonelist_node_idx() - Return the index of the node for an entry
  620. */
  621. struct zonelist {
  622. struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
  623. };
  624. #ifndef CONFIG_DISCONTIGMEM
  625. /* The array of struct pages - for discontigmem use pgdat->lmem_map */
  626. extern struct page *mem_map;
  627. #endif
  628. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  629. struct deferred_split {
  630. spinlock_t split_queue_lock;
  631. struct list_head split_queue;
  632. unsigned long split_queue_len;
  633. };
  634. #endif
  635. /*
  636. * On NUMA machines, each NUMA node would have a pg_data_t to describe
  637. * it's memory layout. On UMA machines there is a single pglist_data which
  638. * describes the whole memory.
  639. *
  640. * Memory statistics and page replacement data structures are maintained on a
  641. * per-zone basis.
  642. */
  643. typedef struct pglist_data {
  644. /*
  645. * node_zones contains just the zones for THIS node. Not all of the
  646. * zones may be populated, but it is the full list. It is referenced by
  647. * this node's node_zonelists as well as other node's node_zonelists.
  648. */
  649. struct zone node_zones[MAX_NR_ZONES];
  650. /*
  651. * node_zonelists contains references to all zones in all nodes.
  652. * Generally the first zones will be references to this node's
  653. * node_zones.
  654. */
  655. struct zonelist node_zonelists[MAX_ZONELISTS];
  656. int nr_zones; /* number of populated zones in this node */
  657. #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
  658. struct page *node_mem_map;
  659. #ifdef CONFIG_PAGE_EXTENSION
  660. struct page_ext *node_page_ext;
  661. #endif
  662. #endif
  663. #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
  664. /*
  665. * Must be held any time you expect node_start_pfn,
  666. * node_present_pages, node_spanned_pages or nr_zones to stay constant.
  667. * Also synchronizes pgdat->first_deferred_pfn during deferred page
  668. * init.
  669. *
  670. * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
  671. * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
  672. * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
  673. *
  674. * Nests above zone->lock and zone->span_seqlock
  675. */
  676. spinlock_t node_size_lock;
  677. #endif
  678. unsigned long node_start_pfn;
  679. unsigned long node_present_pages; /* total number of physical pages */
  680. unsigned long node_spanned_pages; /* total size of physical page
  681. range, including holes */
  682. int node_id;
  683. wait_queue_head_t kswapd_wait;
  684. wait_queue_head_t pfmemalloc_wait;
  685. struct task_struct *kswapd; /* Protected by
  686. mem_hotplug_begin/end() */
  687. struct task_struct *mkswapd[MAX_KSWAPD_THREADS];
  688. int kswapd_order;
  689. enum zone_type kswapd_highest_zoneidx;
  690. int kswapd_failures; /* Number of 'reclaimed == 0' runs */
  691. ANDROID_OEM_DATA(1);
  692. #ifdef CONFIG_COMPACTION
  693. int kcompactd_max_order;
  694. enum zone_type kcompactd_highest_zoneidx;
  695. wait_queue_head_t kcompactd_wait;
  696. struct task_struct *kcompactd;
  697. bool proactive_compact_trigger;
  698. #endif
  699. /*
  700. * This is a per-node reserve of pages that are not available
  701. * to userspace allocations.
  702. */
  703. unsigned long totalreserve_pages;
  704. #ifdef CONFIG_NUMA
  705. /*
  706. * node reclaim becomes active if more unmapped pages exist.
  707. */
  708. unsigned long min_unmapped_pages;
  709. unsigned long min_slab_pages;
  710. #endif /* CONFIG_NUMA */
  711. /* Write-intensive fields used by page reclaim */
  712. ZONE_PADDING(_pad1_)
  713. spinlock_t lru_lock;
  714. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  715. /*
  716. * If memory initialisation on large machines is deferred then this
  717. * is the first PFN that needs to be initialised.
  718. */
  719. unsigned long first_deferred_pfn;
  720. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  721. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  722. struct deferred_split deferred_split_queue;
  723. #endif
  724. /* Fields commonly accessed by the page reclaim scanner */
  725. /*
  726. * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
  727. *
  728. * Use mem_cgroup_lruvec() to look up lruvecs.
  729. */
  730. struct lruvec __lruvec;
  731. unsigned long flags;
  732. ZONE_PADDING(_pad2_)
  733. /* Per-node vmstats */
  734. struct per_cpu_nodestat __percpu *per_cpu_nodestats;
  735. atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
  736. } pg_data_t;
  737. #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
  738. #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
  739. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  740. #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
  741. #else
  742. #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
  743. #endif
  744. #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
  745. #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
  746. #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
  747. static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
  748. {
  749. return pgdat->node_start_pfn + pgdat->node_spanned_pages;
  750. }
  751. static inline bool pgdat_is_empty(pg_data_t *pgdat)
  752. {
  753. return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
  754. }
  755. #include <linux/memory_hotplug.h>
  756. void build_all_zonelists(pg_data_t *pgdat);
  757. void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
  758. enum zone_type highest_zoneidx);
  759. bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  760. int highest_zoneidx, unsigned int alloc_flags,
  761. long free_pages);
  762. bool zone_watermark_ok(struct zone *z, unsigned int order,
  763. unsigned long mark, int highest_zoneidx,
  764. unsigned int alloc_flags);
  765. bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
  766. unsigned long mark, int highest_zoneidx);
  767. /*
  768. * Memory initialization context, use to differentiate memory added by
  769. * the platform statically or via memory hotplug interface.
  770. */
  771. enum meminit_context {
  772. MEMINIT_EARLY,
  773. MEMINIT_HOTPLUG,
  774. };
  775. extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
  776. unsigned long size);
  777. extern void lruvec_init(struct lruvec *lruvec);
  778. static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
  779. {
  780. #ifdef CONFIG_MEMCG
  781. return lruvec->pgdat;
  782. #else
  783. return container_of(lruvec, struct pglist_data, __lruvec);
  784. #endif
  785. }
  786. extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
  787. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  788. int local_memory_node(int node_id);
  789. #else
  790. static inline int local_memory_node(int node_id) { return node_id; };
  791. #endif
  792. /*
  793. * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
  794. */
  795. #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
  796. /*
  797. * Returns true if a zone has pages managed by the buddy allocator.
  798. * All the reclaim decisions have to use this function rather than
  799. * populated_zone(). If the whole zone is reserved then we can easily
  800. * end up with populated_zone() && !managed_zone().
  801. */
  802. static inline bool managed_zone(struct zone *zone)
  803. {
  804. return zone_managed_pages(zone);
  805. }
  806. /* Returns true if a zone has memory */
  807. static inline bool populated_zone(struct zone *zone)
  808. {
  809. return zone->present_pages;
  810. }
  811. #ifdef CONFIG_NEED_MULTIPLE_NODES
  812. static inline int zone_to_nid(struct zone *zone)
  813. {
  814. return zone->node;
  815. }
  816. static inline void zone_set_nid(struct zone *zone, int nid)
  817. {
  818. zone->node = nid;
  819. }
  820. #else
  821. static inline int zone_to_nid(struct zone *zone)
  822. {
  823. return 0;
  824. }
  825. static inline void zone_set_nid(struct zone *zone, int nid) {}
  826. #endif
  827. extern int movable_zone;
  828. #ifdef CONFIG_HIGHMEM
  829. static inline int zone_movable_is_highmem(void)
  830. {
  831. #ifdef CONFIG_NEED_MULTIPLE_NODES
  832. return movable_zone == ZONE_HIGHMEM;
  833. #else
  834. return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
  835. #endif
  836. }
  837. #endif
  838. static inline int is_highmem_idx(enum zone_type idx)
  839. {
  840. #ifdef CONFIG_HIGHMEM
  841. return (idx == ZONE_HIGHMEM ||
  842. (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
  843. #else
  844. return 0;
  845. #endif
  846. }
  847. #ifdef CONFIG_ZONE_DMA
  848. bool has_managed_dma(void);
  849. #else
  850. static inline bool has_managed_dma(void)
  851. {
  852. return false;
  853. }
  854. #endif
  855. /**
  856. * is_highmem - helper function to quickly check if a struct zone is a
  857. * highmem zone or not. This is an attempt to keep references
  858. * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
  859. * @zone - pointer to struct zone variable
  860. */
  861. static inline int is_highmem(struct zone *zone)
  862. {
  863. #ifdef CONFIG_HIGHMEM
  864. return is_highmem_idx(zone_idx(zone));
  865. #else
  866. return 0;
  867. #endif
  868. }
  869. /* These two functions are used to setup the per zone pages min values */
  870. struct ctl_table;
  871. int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
  872. loff_t *);
  873. int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
  874. size_t *, loff_t *);
  875. extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
  876. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
  877. size_t *, loff_t *);
  878. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
  879. void *, size_t *, loff_t *);
  880. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
  881. void *, size_t *, loff_t *);
  882. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
  883. void *, size_t *, loff_t *);
  884. int numa_zonelist_order_handler(struct ctl_table *, int,
  885. void *, size_t *, loff_t *);
  886. extern int percpu_pagelist_fraction;
  887. extern char numa_zonelist_order[];
  888. #define NUMA_ZONELIST_ORDER_LEN 16
  889. #ifndef CONFIG_NEED_MULTIPLE_NODES
  890. extern struct pglist_data contig_page_data;
  891. #define NODE_DATA(nid) (&contig_page_data)
  892. #define NODE_MEM_MAP(nid) mem_map
  893. #else /* CONFIG_NEED_MULTIPLE_NODES */
  894. #include <asm/mmzone.h>
  895. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  896. extern struct pglist_data *first_online_pgdat(void);
  897. extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
  898. extern struct zone *next_zone(struct zone *zone);
  899. extern int isolate_anon_lru_page(struct page *page);
  900. /**
  901. * for_each_online_pgdat - helper macro to iterate over all online nodes
  902. * @pgdat - pointer to a pg_data_t variable
  903. */
  904. #define for_each_online_pgdat(pgdat) \
  905. for (pgdat = first_online_pgdat(); \
  906. pgdat; \
  907. pgdat = next_online_pgdat(pgdat))
  908. /**
  909. * for_each_zone - helper macro to iterate over all memory zones
  910. * @zone - pointer to struct zone variable
  911. *
  912. * The user only needs to declare the zone variable, for_each_zone
  913. * fills it in.
  914. */
  915. #define for_each_zone(zone) \
  916. for (zone = (first_online_pgdat())->node_zones; \
  917. zone; \
  918. zone = next_zone(zone))
  919. #define for_each_populated_zone(zone) \
  920. for (zone = (first_online_pgdat())->node_zones; \
  921. zone; \
  922. zone = next_zone(zone)) \
  923. if (!populated_zone(zone)) \
  924. ; /* do nothing */ \
  925. else
  926. static inline struct zone *zonelist_zone(struct zoneref *zoneref)
  927. {
  928. return zoneref->zone;
  929. }
  930. static inline int zonelist_zone_idx(struct zoneref *zoneref)
  931. {
  932. return zoneref->zone_idx;
  933. }
  934. static inline int zonelist_node_idx(struct zoneref *zoneref)
  935. {
  936. return zone_to_nid(zoneref->zone);
  937. }
  938. struct zoneref *__next_zones_zonelist(struct zoneref *z,
  939. enum zone_type highest_zoneidx,
  940. nodemask_t *nodes);
  941. /**
  942. * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
  943. * @z - The cursor used as a starting point for the search
  944. * @highest_zoneidx - The zone index of the highest zone to return
  945. * @nodes - An optional nodemask to filter the zonelist with
  946. *
  947. * This function returns the next zone at or below a given zone index that is
  948. * within the allowed nodemask using a cursor as the starting point for the
  949. * search. The zoneref returned is a cursor that represents the current zone
  950. * being examined. It should be advanced by one before calling
  951. * next_zones_zonelist again.
  952. */
  953. static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
  954. enum zone_type highest_zoneidx,
  955. nodemask_t *nodes)
  956. {
  957. if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
  958. return z;
  959. return __next_zones_zonelist(z, highest_zoneidx, nodes);
  960. }
  961. /**
  962. * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
  963. * @zonelist - The zonelist to search for a suitable zone
  964. * @highest_zoneidx - The zone index of the highest zone to return
  965. * @nodes - An optional nodemask to filter the zonelist with
  966. * @return - Zoneref pointer for the first suitable zone found (see below)
  967. *
  968. * This function returns the first zone at or below a given zone index that is
  969. * within the allowed nodemask. The zoneref returned is a cursor that can be
  970. * used to iterate the zonelist with next_zones_zonelist by advancing it by
  971. * one before calling.
  972. *
  973. * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
  974. * never NULL). This may happen either genuinely, or due to concurrent nodemask
  975. * update due to cpuset modification.
  976. */
  977. static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
  978. enum zone_type highest_zoneidx,
  979. nodemask_t *nodes)
  980. {
  981. return next_zones_zonelist(zonelist->_zonerefs,
  982. highest_zoneidx, nodes);
  983. }
  984. /**
  985. * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
  986. * @zone - The current zone in the iterator
  987. * @z - The current pointer within zonelist->_zonerefs being iterated
  988. * @zlist - The zonelist being iterated
  989. * @highidx - The zone index of the highest zone to return
  990. * @nodemask - Nodemask allowed by the allocator
  991. *
  992. * This iterator iterates though all zones at or below a given zone index and
  993. * within a given nodemask
  994. */
  995. #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
  996. for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
  997. zone; \
  998. z = next_zones_zonelist(++z, highidx, nodemask), \
  999. zone = zonelist_zone(z))
  1000. #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
  1001. for (zone = z->zone; \
  1002. zone; \
  1003. z = next_zones_zonelist(++z, highidx, nodemask), \
  1004. zone = zonelist_zone(z))
  1005. /**
  1006. * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
  1007. * @zone - The current zone in the iterator
  1008. * @z - The current pointer within zonelist->zones being iterated
  1009. * @zlist - The zonelist being iterated
  1010. * @highidx - The zone index of the highest zone to return
  1011. *
  1012. * This iterator iterates though all zones at or below a given zone index.
  1013. */
  1014. #define for_each_zone_zonelist(zone, z, zlist, highidx) \
  1015. for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
  1016. #ifdef CONFIG_SPARSEMEM
  1017. #include <asm/sparsemem.h>
  1018. #endif
  1019. #ifdef CONFIG_FLATMEM
  1020. #define pfn_to_nid(pfn) (0)
  1021. #endif
  1022. #ifdef CONFIG_SPARSEMEM
  1023. /*
  1024. * SECTION_SHIFT #bits space required to store a section #
  1025. *
  1026. * PA_SECTION_SHIFT physical address to/from section number
  1027. * PFN_SECTION_SHIFT pfn to/from section number
  1028. */
  1029. #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
  1030. #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
  1031. #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
  1032. #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
  1033. #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
  1034. #define SECTION_BLOCKFLAGS_BITS \
  1035. ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
  1036. #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
  1037. #error Allocator MAX_ORDER exceeds SECTION_SIZE
  1038. #endif
  1039. static inline unsigned long pfn_to_section_nr(unsigned long pfn)
  1040. {
  1041. return pfn >> PFN_SECTION_SHIFT;
  1042. }
  1043. static inline unsigned long section_nr_to_pfn(unsigned long sec)
  1044. {
  1045. return sec << PFN_SECTION_SHIFT;
  1046. }
  1047. #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
  1048. #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
  1049. #define SUBSECTION_SHIFT 21
  1050. #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
  1051. #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
  1052. #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
  1053. #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
  1054. #if SUBSECTION_SHIFT > SECTION_SIZE_BITS
  1055. #error Subsection size exceeds section size
  1056. #else
  1057. #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
  1058. #endif
  1059. #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
  1060. #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
  1061. struct mem_section_usage {
  1062. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  1063. DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
  1064. #endif
  1065. /* See declaration of similar field in struct zone */
  1066. unsigned long pageblock_flags[0];
  1067. };
  1068. void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
  1069. struct page;
  1070. struct page_ext;
  1071. struct mem_section {
  1072. /*
  1073. * This is, logically, a pointer to an array of struct
  1074. * pages. However, it is stored with some other magic.
  1075. * (see sparse.c::sparse_init_one_section())
  1076. *
  1077. * Additionally during early boot we encode node id of
  1078. * the location of the section here to guide allocation.
  1079. * (see sparse.c::memory_present())
  1080. *
  1081. * Making it a UL at least makes someone do a cast
  1082. * before using it wrong.
  1083. */
  1084. unsigned long section_mem_map;
  1085. struct mem_section_usage *usage;
  1086. #ifdef CONFIG_PAGE_EXTENSION
  1087. /*
  1088. * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
  1089. * section. (see page_ext.h about this.)
  1090. */
  1091. struct page_ext *page_ext;
  1092. unsigned long pad;
  1093. #endif
  1094. /*
  1095. * WARNING: mem_section must be a power-of-2 in size for the
  1096. * calculation and use of SECTION_ROOT_MASK to make sense.
  1097. */
  1098. };
  1099. #ifdef CONFIG_SPARSEMEM_EXTREME
  1100. #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
  1101. #else
  1102. #define SECTIONS_PER_ROOT 1
  1103. #endif
  1104. #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
  1105. #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
  1106. #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
  1107. #ifdef CONFIG_SPARSEMEM_EXTREME
  1108. extern struct mem_section **mem_section;
  1109. #else
  1110. extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
  1111. #endif
  1112. static inline unsigned long *section_to_usemap(struct mem_section *ms)
  1113. {
  1114. return ms->usage->pageblock_flags;
  1115. }
  1116. static inline struct mem_section *__nr_to_section(unsigned long nr)
  1117. {
  1118. unsigned long root = SECTION_NR_TO_ROOT(nr);
  1119. if (unlikely(root >= NR_SECTION_ROOTS))
  1120. return NULL;
  1121. #ifdef CONFIG_SPARSEMEM_EXTREME
  1122. if (!mem_section || !mem_section[root])
  1123. return NULL;
  1124. #endif
  1125. return &mem_section[root][nr & SECTION_ROOT_MASK];
  1126. }
  1127. extern unsigned long __section_nr(struct mem_section *ms);
  1128. extern size_t mem_section_usage_size(void);
  1129. /*
  1130. * We use the lower bits of the mem_map pointer to store
  1131. * a little bit of information. The pointer is calculated
  1132. * as mem_map - section_nr_to_pfn(pnum). The result is
  1133. * aligned to the minimum alignment of the two values:
  1134. * 1. All mem_map arrays are page-aligned.
  1135. * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
  1136. * lowest bits. PFN_SECTION_SHIFT is arch-specific
  1137. * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
  1138. * worst combination is powerpc with 256k pages,
  1139. * which results in PFN_SECTION_SHIFT equal 6.
  1140. * To sum it up, at least 6 bits are available.
  1141. */
  1142. #define SECTION_MARKED_PRESENT (1UL<<0)
  1143. #define SECTION_HAS_MEM_MAP (1UL<<1)
  1144. #define SECTION_IS_ONLINE (1UL<<2)
  1145. #define SECTION_IS_EARLY (1UL<<3)
  1146. #define SECTION_MAP_LAST_BIT (1UL<<4)
  1147. #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
  1148. #define SECTION_NID_SHIFT 3
  1149. static inline struct page *__section_mem_map_addr(struct mem_section *section)
  1150. {
  1151. unsigned long map = section->section_mem_map;
  1152. map &= SECTION_MAP_MASK;
  1153. return (struct page *)map;
  1154. }
  1155. static inline int present_section(struct mem_section *section)
  1156. {
  1157. return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
  1158. }
  1159. static inline int present_section_nr(unsigned long nr)
  1160. {
  1161. return present_section(__nr_to_section(nr));
  1162. }
  1163. static inline int valid_section(struct mem_section *section)
  1164. {
  1165. return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
  1166. }
  1167. static inline int early_section(struct mem_section *section)
  1168. {
  1169. return (section && (section->section_mem_map & SECTION_IS_EARLY));
  1170. }
  1171. static inline int valid_section_nr(unsigned long nr)
  1172. {
  1173. return valid_section(__nr_to_section(nr));
  1174. }
  1175. static inline int online_section(struct mem_section *section)
  1176. {
  1177. return (section && (section->section_mem_map & SECTION_IS_ONLINE));
  1178. }
  1179. static inline int online_section_nr(unsigned long nr)
  1180. {
  1181. return online_section(__nr_to_section(nr));
  1182. }
  1183. #ifdef CONFIG_MEMORY_HOTPLUG
  1184. void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
  1185. #ifdef CONFIG_MEMORY_HOTREMOVE
  1186. void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
  1187. #endif
  1188. #endif
  1189. static inline struct mem_section *__pfn_to_section(unsigned long pfn)
  1190. {
  1191. return __nr_to_section(pfn_to_section_nr(pfn));
  1192. }
  1193. extern unsigned long __highest_present_section_nr;
  1194. static inline int subsection_map_index(unsigned long pfn)
  1195. {
  1196. return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
  1197. }
  1198. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  1199. static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
  1200. {
  1201. int idx = subsection_map_index(pfn);
  1202. return test_bit(idx, ms->usage->subsection_map);
  1203. }
  1204. #else
  1205. static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
  1206. {
  1207. return 1;
  1208. }
  1209. #endif
  1210. #ifndef CONFIG_HAVE_ARCH_PFN_VALID
  1211. static inline int pfn_valid(unsigned long pfn)
  1212. {
  1213. struct mem_section *ms;
  1214. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  1215. return 0;
  1216. ms = __nr_to_section(pfn_to_section_nr(pfn));
  1217. if (!valid_section(ms))
  1218. return 0;
  1219. /*
  1220. * Traditionally early sections always returned pfn_valid() for
  1221. * the entire section-sized span.
  1222. */
  1223. return early_section(ms) || pfn_section_valid(ms, pfn);
  1224. }
  1225. #endif
  1226. static inline int pfn_in_present_section(unsigned long pfn)
  1227. {
  1228. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  1229. return 0;
  1230. return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
  1231. }
  1232. static inline unsigned long next_present_section_nr(unsigned long section_nr)
  1233. {
  1234. while (++section_nr <= __highest_present_section_nr) {
  1235. if (present_section_nr(section_nr))
  1236. return section_nr;
  1237. }
  1238. return -1;
  1239. }
  1240. /*
  1241. * These are _only_ used during initialisation, therefore they
  1242. * can use __initdata ... They could have names to indicate
  1243. * this restriction.
  1244. */
  1245. #ifdef CONFIG_NUMA
  1246. #define pfn_to_nid(pfn) \
  1247. ({ \
  1248. unsigned long __pfn_to_nid_pfn = (pfn); \
  1249. page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
  1250. })
  1251. #else
  1252. #define pfn_to_nid(pfn) (0)
  1253. #endif
  1254. void sparse_init(void);
  1255. #else
  1256. #define sparse_init() do {} while (0)
  1257. #define sparse_index_init(_sec, _nid) do {} while (0)
  1258. #define pfn_in_present_section pfn_valid
  1259. #define subsection_map_init(_pfn, _nr_pages) do {} while (0)
  1260. #endif /* CONFIG_SPARSEMEM */
  1261. /*
  1262. * During memory init memblocks map pfns to nids. The search is expensive and
  1263. * this caches recent lookups. The implementation of __early_pfn_to_nid
  1264. * may treat start/end as pfns or sections.
  1265. */
  1266. struct mminit_pfnnid_cache {
  1267. unsigned long last_start;
  1268. unsigned long last_end;
  1269. int last_nid;
  1270. };
  1271. /*
  1272. * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
  1273. * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
  1274. * pfn_valid_within() should be used in this case; we optimise this away
  1275. * when we have no holes within a MAX_ORDER_NR_PAGES block.
  1276. */
  1277. #ifdef CONFIG_HOLES_IN_ZONE
  1278. #define pfn_valid_within(pfn) pfn_valid(pfn)
  1279. #else
  1280. #define pfn_valid_within(pfn) (1)
  1281. #endif
  1282. #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
  1283. /*
  1284. * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
  1285. * associated with it or not. This means that a struct page exists for this
  1286. * pfn. The caller cannot assume the page is fully initialized in general.
  1287. * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
  1288. * will ensure the struct page is fully online and initialized. Special pages
  1289. * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
  1290. *
  1291. * In FLATMEM, it is expected that holes always have valid memmap as long as
  1292. * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
  1293. * that a valid section has a memmap for the entire section.
  1294. *
  1295. * However, an ARM, and maybe other embedded architectures in the future
  1296. * free memmap backing holes to save memory on the assumption the memmap is
  1297. * never used. The page_zone linkages are then broken even though pfn_valid()
  1298. * returns true. A walker of the full memmap must then do this additional
  1299. * check to ensure the memmap they are looking at is sane by making sure
  1300. * the zone and PFN linkages are still valid. This is expensive, but walkers
  1301. * of the full memmap are extremely rare.
  1302. */
  1303. bool memmap_valid_within(unsigned long pfn,
  1304. struct page *page, struct zone *zone);
  1305. #else
  1306. static inline bool memmap_valid_within(unsigned long pfn,
  1307. struct page *page, struct zone *zone)
  1308. {
  1309. return true;
  1310. }
  1311. #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
  1312. #endif /* !__GENERATING_BOUNDS.H */
  1313. #endif /* !__ASSEMBLY__ */
  1314. #endif /* _LINUX_MMZONE_H */