swap.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/mm/swap.c
  4. *
  5. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  6. */
  7. /*
  8. * This file contains the default values for the operation of the
  9. * Linux VM subsystem. Fine-tuning documentation can be found in
  10. * Documentation/admin-guide/sysctl/vm.rst.
  11. * Started 18.12.91
  12. * Swap aging added 23.2.95, Stephen Tweedie.
  13. * Buffermem limits added 12.3.98, Rik van Riel.
  14. */
  15. #include <linux/mm.h>
  16. #include <linux/sched.h>
  17. #include <linux/kernel_stat.h>
  18. #include <linux/swap.h>
  19. #include <linux/mman.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/pagevec.h>
  22. #include <linux/init.h>
  23. #include <linux/export.h>
  24. #include <linux/mm_inline.h>
  25. #include <linux/percpu_counter.h>
  26. #include <linux/memremap.h>
  27. #include <linux/percpu.h>
  28. #include <linux/cpu.h>
  29. #include <linux/notifier.h>
  30. #include <linux/backing-dev.h>
  31. #include <linux/memcontrol.h>
  32. #include <linux/gfp.h>
  33. #include <linux/uio.h>
  34. #include <linux/hugetlb.h>
  35. #include <linux/page_idle.h>
  36. #include <linux/local_lock.h>
  37. #include <linux/buffer_head.h>
  38. #include "internal.h"
  39. #define CREATE_TRACE_POINTS
  40. #include <trace/events/pagemap.h>
  41. /* How many pages do we try to swap or page in/out together? */
  42. int page_cluster;
  43. /* Protecting only lru_rotate.pvec which requires disabling interrupts */
  44. struct lru_rotate {
  45. local_lock_t lock;
  46. struct pagevec pvec;
  47. };
  48. static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
  49. .lock = INIT_LOCAL_LOCK(lock),
  50. };
  51. /*
  52. * The following struct pagevec are grouped together because they are protected
  53. * by disabling preemption (and interrupts remain enabled).
  54. */
  55. struct lru_pvecs {
  56. local_lock_t lock;
  57. struct pagevec lru_add;
  58. struct pagevec lru_deactivate_file;
  59. struct pagevec lru_deactivate;
  60. struct pagevec lru_lazyfree;
  61. struct pagevec lru_lazyfree_movetail;
  62. #ifdef CONFIG_SMP
  63. struct pagevec activate_page;
  64. #endif
  65. };
  66. static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
  67. .lock = INIT_LOCAL_LOCK(lock),
  68. };
  69. /*
  70. * This path almost never happens for VM activity - pages are normally
  71. * freed via pagevecs. But it gets used by networking.
  72. */
  73. static void __page_cache_release(struct page *page)
  74. {
  75. if (PageLRU(page)) {
  76. pg_data_t *pgdat = page_pgdat(page);
  77. struct lruvec *lruvec;
  78. unsigned long flags;
  79. spin_lock_irqsave(&pgdat->lru_lock, flags);
  80. lruvec = mem_cgroup_page_lruvec(page, pgdat);
  81. VM_BUG_ON_PAGE(!PageLRU(page), page);
  82. __ClearPageLRU(page);
  83. del_page_from_lru_list(page, lruvec, page_off_lru(page));
  84. spin_unlock_irqrestore(&pgdat->lru_lock, flags);
  85. }
  86. __ClearPageWaiters(page);
  87. }
  88. static void __put_single_page(struct page *page)
  89. {
  90. __page_cache_release(page);
  91. mem_cgroup_uncharge(page);
  92. free_unref_page(page);
  93. }
  94. static void __put_compound_page(struct page *page)
  95. {
  96. /*
  97. * __page_cache_release() is supposed to be called for thp, not for
  98. * hugetlb. This is because hugetlb page does never have PageLRU set
  99. * (it's never listed to any LRU lists) and no memcg routines should
  100. * be called for hugetlb (it has a separate hugetlb_cgroup.)
  101. */
  102. if (!PageHuge(page))
  103. __page_cache_release(page);
  104. destroy_compound_page(page);
  105. }
  106. void __put_page(struct page *page)
  107. {
  108. if (is_zone_device_page(page)) {
  109. put_dev_pagemap(page->pgmap);
  110. /*
  111. * The page belongs to the device that created pgmap. Do
  112. * not return it to page allocator.
  113. */
  114. return;
  115. }
  116. if (unlikely(PageCompound(page)))
  117. __put_compound_page(page);
  118. else
  119. __put_single_page(page);
  120. }
  121. EXPORT_SYMBOL(__put_page);
  122. /**
  123. * put_pages_list() - release a list of pages
  124. * @pages: list of pages threaded on page->lru
  125. *
  126. * Release a list of pages which are strung together on page.lru. Currently
  127. * used by read_cache_pages() and related error recovery code.
  128. */
  129. void put_pages_list(struct list_head *pages)
  130. {
  131. while (!list_empty(pages)) {
  132. struct page *victim;
  133. victim = lru_to_page(pages);
  134. list_del(&victim->lru);
  135. put_page(victim);
  136. }
  137. }
  138. EXPORT_SYMBOL(put_pages_list);
  139. /*
  140. * get_kernel_pages() - pin kernel pages in memory
  141. * @kiov: An array of struct kvec structures
  142. * @nr_segs: number of segments to pin
  143. * @write: pinning for read/write, currently ignored
  144. * @pages: array that receives pointers to the pages pinned.
  145. * Should be at least nr_segs long.
  146. *
  147. * Returns number of pages pinned. This may be fewer than the number
  148. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  149. * were pinned, returns -errno. Each page returned must be released
  150. * with a put_page() call when it is finished with.
  151. */
  152. int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
  153. struct page **pages)
  154. {
  155. int seg;
  156. for (seg = 0; seg < nr_segs; seg++) {
  157. if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
  158. return seg;
  159. pages[seg] = kmap_to_page(kiov[seg].iov_base);
  160. get_page(pages[seg]);
  161. }
  162. return seg;
  163. }
  164. EXPORT_SYMBOL_GPL(get_kernel_pages);
  165. /*
  166. * get_kernel_page() - pin a kernel page in memory
  167. * @start: starting kernel address
  168. * @write: pinning for read/write, currently ignored
  169. * @pages: array that receives pointer to the page pinned.
  170. * Must be at least nr_segs long.
  171. *
  172. * Returns 1 if page is pinned. If the page was not pinned, returns
  173. * -errno. The page returned must be released with a put_page() call
  174. * when it is finished with.
  175. */
  176. int get_kernel_page(unsigned long start, int write, struct page **pages)
  177. {
  178. const struct kvec kiov = {
  179. .iov_base = (void *)start,
  180. .iov_len = PAGE_SIZE
  181. };
  182. return get_kernel_pages(&kiov, 1, write, pages);
  183. }
  184. EXPORT_SYMBOL_GPL(get_kernel_page);
  185. static void pagevec_lru_move_fn(struct pagevec *pvec,
  186. void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
  187. void *arg)
  188. {
  189. int i;
  190. struct pglist_data *pgdat = NULL;
  191. struct lruvec *lruvec;
  192. unsigned long flags = 0;
  193. for (i = 0; i < pagevec_count(pvec); i++) {
  194. struct page *page = pvec->pages[i];
  195. struct pglist_data *pagepgdat = page_pgdat(page);
  196. if (pagepgdat != pgdat) {
  197. if (pgdat)
  198. spin_unlock_irqrestore(&pgdat->lru_lock, flags);
  199. pgdat = pagepgdat;
  200. spin_lock_irqsave(&pgdat->lru_lock, flags);
  201. }
  202. lruvec = mem_cgroup_page_lruvec(page, pgdat);
  203. (*move_fn)(page, lruvec, arg);
  204. }
  205. if (pgdat)
  206. spin_unlock_irqrestore(&pgdat->lru_lock, flags);
  207. release_pages(pvec->pages, pvec->nr);
  208. pagevec_reinit(pvec);
  209. }
  210. static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
  211. void *arg)
  212. {
  213. int *pgmoved = arg;
  214. if (PageLRU(page) && !PageUnevictable(page)) {
  215. del_page_from_lru_list(page, lruvec, page_lru(page));
  216. ClearPageActive(page);
  217. add_page_to_lru_list_tail(page, lruvec, page_lru(page));
  218. (*pgmoved) += thp_nr_pages(page);
  219. }
  220. }
  221. /*
  222. * pagevec_move_tail() must be called with IRQ disabled.
  223. * Otherwise this may cause nasty races.
  224. */
  225. static void pagevec_move_tail(struct pagevec *pvec)
  226. {
  227. int pgmoved = 0;
  228. pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
  229. __count_vm_events(PGROTATED, pgmoved);
  230. }
  231. /* return true if pagevec needs to drain */
  232. static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
  233. {
  234. bool ret = false;
  235. if (!pagevec_add(pvec, page) || PageCompound(page) ||
  236. lru_cache_disabled())
  237. ret = true;
  238. return ret;
  239. }
  240. /*
  241. * Writeback is about to end against a page which has been marked for immediate
  242. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  243. * inactive list.
  244. */
  245. void rotate_reclaimable_page(struct page *page)
  246. {
  247. if (!PageLocked(page) && !PageDirty(page) &&
  248. !PageUnevictable(page) && PageLRU(page)) {
  249. struct pagevec *pvec;
  250. unsigned long flags;
  251. get_page(page);
  252. local_lock_irqsave(&lru_rotate.lock, flags);
  253. pvec = this_cpu_ptr(&lru_rotate.pvec);
  254. if (pagevec_add_and_need_flush(pvec, page))
  255. pagevec_move_tail(pvec);
  256. local_unlock_irqrestore(&lru_rotate.lock, flags);
  257. }
  258. }
  259. void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
  260. {
  261. do {
  262. unsigned long lrusize;
  263. /* Record cost event */
  264. if (file)
  265. lruvec->file_cost += nr_pages;
  266. else
  267. lruvec->anon_cost += nr_pages;
  268. /*
  269. * Decay previous events
  270. *
  271. * Because workloads change over time (and to avoid
  272. * overflow) we keep these statistics as a floating
  273. * average, which ends up weighing recent refaults
  274. * more than old ones.
  275. */
  276. lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
  277. lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
  278. lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
  279. lruvec_page_state(lruvec, NR_ACTIVE_FILE);
  280. if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
  281. lruvec->file_cost /= 2;
  282. lruvec->anon_cost /= 2;
  283. }
  284. } while ((lruvec = parent_lruvec(lruvec)));
  285. }
  286. void lru_note_cost_page(struct page *page)
  287. {
  288. lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
  289. page_is_file_lru(page), thp_nr_pages(page));
  290. }
  291. static void __activate_page(struct page *page, struct lruvec *lruvec,
  292. void *arg)
  293. {
  294. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  295. int lru = page_lru_base_type(page);
  296. int nr_pages = thp_nr_pages(page);
  297. del_page_from_lru_list(page, lruvec, lru);
  298. SetPageActive(page);
  299. lru += LRU_ACTIVE;
  300. add_page_to_lru_list(page, lruvec, lru);
  301. trace_mm_lru_activate(page);
  302. __count_vm_events(PGACTIVATE, nr_pages);
  303. __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
  304. nr_pages);
  305. }
  306. }
  307. #ifdef CONFIG_SMP
  308. static void activate_page_drain(int cpu)
  309. {
  310. struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
  311. if (pagevec_count(pvec))
  312. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  313. }
  314. static bool need_activate_page_drain(int cpu)
  315. {
  316. return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
  317. }
  318. static void activate_page(struct page *page)
  319. {
  320. page = compound_head(page);
  321. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  322. struct pagevec *pvec;
  323. local_lock(&lru_pvecs.lock);
  324. pvec = this_cpu_ptr(&lru_pvecs.activate_page);
  325. get_page(page);
  326. if (pagevec_add_and_need_flush(pvec, page))
  327. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  328. local_unlock(&lru_pvecs.lock);
  329. }
  330. }
  331. #else
  332. static inline void activate_page_drain(int cpu)
  333. {
  334. }
  335. static void activate_page(struct page *page)
  336. {
  337. pg_data_t *pgdat = page_pgdat(page);
  338. page = compound_head(page);
  339. spin_lock_irq(&pgdat->lru_lock);
  340. __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
  341. spin_unlock_irq(&pgdat->lru_lock);
  342. }
  343. #endif
  344. static void __lru_cache_activate_page(struct page *page)
  345. {
  346. struct pagevec *pvec;
  347. int i;
  348. local_lock(&lru_pvecs.lock);
  349. pvec = this_cpu_ptr(&lru_pvecs.lru_add);
  350. /*
  351. * Search backwards on the optimistic assumption that the page being
  352. * activated has just been added to this pagevec. Note that only
  353. * the local pagevec is examined as a !PageLRU page could be in the
  354. * process of being released, reclaimed, migrated or on a remote
  355. * pagevec that is currently being drained. Furthermore, marking
  356. * a remote pagevec's page PageActive potentially hits a race where
  357. * a page is marked PageActive just after it is added to the inactive
  358. * list causing accounting errors and BUG_ON checks to trigger.
  359. */
  360. for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
  361. struct page *pagevec_page = pvec->pages[i];
  362. if (pagevec_page == page) {
  363. SetPageActive(page);
  364. break;
  365. }
  366. }
  367. local_unlock(&lru_pvecs.lock);
  368. }
  369. /*
  370. * Mark a page as having seen activity.
  371. *
  372. * inactive,unreferenced -> inactive,referenced
  373. * inactive,referenced -> active,unreferenced
  374. * active,unreferenced -> active,referenced
  375. *
  376. * When a newly allocated page is not yet visible, so safe for non-atomic ops,
  377. * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
  378. */
  379. void mark_page_accessed(struct page *page)
  380. {
  381. page = compound_head(page);
  382. if (!PageReferenced(page)) {
  383. SetPageReferenced(page);
  384. } else if (PageUnevictable(page)) {
  385. /*
  386. * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
  387. * this list is never rotated or maintained, so marking an
  388. * evictable page accessed has no effect.
  389. */
  390. } else if (!PageActive(page)) {
  391. /*
  392. * If the page is on the LRU, queue it for activation via
  393. * lru_pvecs.activate_page. Otherwise, assume the page is on a
  394. * pagevec, mark it active and it'll be moved to the active
  395. * LRU on the next drain.
  396. */
  397. if (PageLRU(page))
  398. activate_page(page);
  399. else
  400. __lru_cache_activate_page(page);
  401. ClearPageReferenced(page);
  402. workingset_activation(page);
  403. }
  404. if (page_is_idle(page))
  405. clear_page_idle(page);
  406. }
  407. EXPORT_SYMBOL(mark_page_accessed);
  408. /**
  409. * lru_cache_add - add a page to a page list
  410. * @page: the page to be added to the LRU.
  411. *
  412. * Queue the page for addition to the LRU via pagevec. The decision on whether
  413. * to add the page to the [in]active [file|anon] list is deferred until the
  414. * pagevec is drained. This gives a chance for the caller of lru_cache_add()
  415. * have the page added to the active list using mark_page_accessed().
  416. */
  417. void lru_cache_add(struct page *page)
  418. {
  419. struct pagevec *pvec;
  420. VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
  421. VM_BUG_ON_PAGE(PageLRU(page), page);
  422. get_page(page);
  423. local_lock(&lru_pvecs.lock);
  424. pvec = this_cpu_ptr(&lru_pvecs.lru_add);
  425. if (pagevec_add_and_need_flush(pvec, page))
  426. __pagevec_lru_add(pvec);
  427. local_unlock(&lru_pvecs.lock);
  428. }
  429. EXPORT_SYMBOL(lru_cache_add);
  430. /**
  431. * lru_cache_add_inactive_or_unevictable
  432. * @page: the page to be added to LRU
  433. * @vma: vma in which page is mapped for determining reclaimability
  434. *
  435. * Place @page on the inactive or unevictable LRU list, depending on its
  436. * evictability.
  437. */
  438. void __lru_cache_add_inactive_or_unevictable(struct page *page,
  439. unsigned long vma_flags)
  440. {
  441. bool unevictable;
  442. VM_BUG_ON_PAGE(PageLRU(page), page);
  443. unevictable = (vma_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
  444. if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
  445. int nr_pages = thp_nr_pages(page);
  446. /*
  447. * We use the irq-unsafe __mod_zone_page_stat because this
  448. * counter is not modified from interrupt context, and the pte
  449. * lock is held(spinlock), which implies preemption disabled.
  450. */
  451. __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
  452. count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
  453. }
  454. lru_cache_add(page);
  455. }
  456. /*
  457. * If the page can not be invalidated, it is moved to the
  458. * inactive list to speed up its reclaim. It is moved to the
  459. * head of the list, rather than the tail, to give the flusher
  460. * threads some time to write it out, as this is much more
  461. * effective than the single-page writeout from reclaim.
  462. *
  463. * If the page isn't page_mapped and dirty/writeback, the page
  464. * could reclaim asap using PG_reclaim.
  465. *
  466. * 1. active, mapped page -> none
  467. * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
  468. * 3. inactive, mapped page -> none
  469. * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
  470. * 5. inactive, clean -> inactive, tail
  471. * 6. Others -> none
  472. *
  473. * In 4, why it moves inactive's head, the VM expects the page would
  474. * be write it out by flusher threads as this is much more effective
  475. * than the single-page writeout from reclaim.
  476. */
  477. static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
  478. void *arg)
  479. {
  480. int lru;
  481. bool active;
  482. int nr_pages = thp_nr_pages(page);
  483. if (!PageLRU(page))
  484. return;
  485. if (PageUnevictable(page))
  486. return;
  487. /* Some processes are using the page */
  488. if (page_mapped(page))
  489. return;
  490. active = PageActive(page);
  491. lru = page_lru_base_type(page);
  492. del_page_from_lru_list(page, lruvec, lru + active);
  493. ClearPageActive(page);
  494. ClearPageReferenced(page);
  495. if (PageWriteback(page) || PageDirty(page)) {
  496. /*
  497. * PG_reclaim could be raced with end_page_writeback
  498. * It can make readahead confusing. But race window
  499. * is _really_ small and it's non-critical problem.
  500. */
  501. add_page_to_lru_list(page, lruvec, lru);
  502. SetPageReclaim(page);
  503. } else {
  504. /*
  505. * The page's writeback ends up during pagevec
  506. * We moves tha page into tail of inactive.
  507. */
  508. add_page_to_lru_list_tail(page, lruvec, lru);
  509. __count_vm_events(PGROTATED, nr_pages);
  510. }
  511. if (active) {
  512. __count_vm_events(PGDEACTIVATE, nr_pages);
  513. __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
  514. nr_pages);
  515. }
  516. }
  517. static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
  518. void *arg)
  519. {
  520. if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
  521. int lru = page_lru_base_type(page);
  522. int nr_pages = thp_nr_pages(page);
  523. del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
  524. ClearPageActive(page);
  525. ClearPageReferenced(page);
  526. add_page_to_lru_list(page, lruvec, lru);
  527. __count_vm_events(PGDEACTIVATE, nr_pages);
  528. __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
  529. nr_pages);
  530. }
  531. }
  532. static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
  533. void *arg)
  534. {
  535. if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
  536. !PageSwapCache(page) && !PageUnevictable(page)) {
  537. bool active = PageActive(page);
  538. int nr_pages = thp_nr_pages(page);
  539. del_page_from_lru_list(page, lruvec,
  540. LRU_INACTIVE_ANON + active);
  541. ClearPageActive(page);
  542. ClearPageReferenced(page);
  543. /*
  544. * Lazyfree pages are clean anonymous pages. They have
  545. * PG_swapbacked flag cleared, to distinguish them from normal
  546. * anonymous pages
  547. */
  548. ClearPageSwapBacked(page);
  549. add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
  550. __count_vm_events(PGLAZYFREE, nr_pages);
  551. __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
  552. nr_pages);
  553. }
  554. }
  555. static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec,
  556. void *arg)
  557. {
  558. bool *add_to_tail = (bool *)arg;
  559. if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
  560. !PageSwapCache(page)) {
  561. bool active = PageActive(page);
  562. del_page_from_lru_list(page, lruvec,
  563. LRU_INACTIVE_ANON + active);
  564. ClearPageActive(page);
  565. ClearPageReferenced(page);
  566. if (add_to_tail && *add_to_tail)
  567. add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE);
  568. else
  569. add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
  570. }
  571. }
  572. /*
  573. * Drain pages out of the cpu's pagevecs.
  574. * Either "cpu" is the current CPU, and preemption has already been
  575. * disabled; or "cpu" is being hot-unplugged, and is already dead.
  576. */
  577. void lru_add_drain_cpu(int cpu)
  578. {
  579. struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
  580. if (pagevec_count(pvec))
  581. __pagevec_lru_add(pvec);
  582. pvec = &per_cpu(lru_rotate.pvec, cpu);
  583. /* Disabling interrupts below acts as a compiler barrier. */
  584. if (data_race(pagevec_count(pvec))) {
  585. unsigned long flags;
  586. /* No harm done if a racing interrupt already did this */
  587. local_lock_irqsave(&lru_rotate.lock, flags);
  588. pagevec_move_tail(pvec);
  589. local_unlock_irqrestore(&lru_rotate.lock, flags);
  590. }
  591. pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
  592. if (pagevec_count(pvec))
  593. pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
  594. pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
  595. if (pagevec_count(pvec))
  596. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  597. pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
  598. if (pagevec_count(pvec))
  599. pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
  600. pvec = &per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu);
  601. if (pagevec_count(pvec))
  602. pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL);
  603. activate_page_drain(cpu);
  604. }
  605. /**
  606. * deactivate_file_page - forcefully deactivate a file page
  607. * @page: page to deactivate
  608. *
  609. * This function hints the VM that @page is a good reclaim candidate,
  610. * for example if its invalidation fails due to the page being dirty
  611. * or under writeback.
  612. */
  613. void deactivate_file_page(struct page *page)
  614. {
  615. /*
  616. * In a workload with many unevictable page such as mprotect,
  617. * unevictable page deactivation for accelerating reclaim is pointless.
  618. */
  619. if (PageUnevictable(page))
  620. return;
  621. if (likely(get_page_unless_zero(page))) {
  622. struct pagevec *pvec;
  623. local_lock(&lru_pvecs.lock);
  624. pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
  625. if (pagevec_add_and_need_flush(pvec, page))
  626. pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
  627. local_unlock(&lru_pvecs.lock);
  628. }
  629. }
  630. /*
  631. * deactivate_page - deactivate a page
  632. * @page: page to deactivate
  633. *
  634. * deactivate_page() moves @page to the inactive list if @page was on the active
  635. * list and was not an unevictable page. This is done to accelerate the reclaim
  636. * of @page.
  637. */
  638. void deactivate_page(struct page *page)
  639. {
  640. if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
  641. struct pagevec *pvec;
  642. local_lock(&lru_pvecs.lock);
  643. pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
  644. get_page(page);
  645. if (pagevec_add_and_need_flush(pvec, page))
  646. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  647. local_unlock(&lru_pvecs.lock);
  648. }
  649. }
  650. /**
  651. * mark_page_lazyfree - make an anon page lazyfree
  652. * @page: page to deactivate
  653. *
  654. * mark_page_lazyfree() moves @page to the inactive file list.
  655. * This is done to accelerate the reclaim of @page.
  656. */
  657. void mark_page_lazyfree(struct page *page)
  658. {
  659. if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
  660. !PageSwapCache(page) && !PageUnevictable(page)) {
  661. struct pagevec *pvec;
  662. local_lock(&lru_pvecs.lock);
  663. pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
  664. get_page(page);
  665. if (pagevec_add_and_need_flush(pvec, page))
  666. pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
  667. local_unlock(&lru_pvecs.lock);
  668. }
  669. }
  670. /**
  671. * mark_page_lazyfree_movetail - make a swapbacked page lazyfree
  672. * @page: page to deactivate
  673. *
  674. * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list.
  675. * This is done to accelerate the reclaim of @page.
  676. */
  677. void mark_page_lazyfree_movetail(struct page *page, bool tail)
  678. {
  679. if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
  680. !PageSwapCache(page)) {
  681. struct pagevec *pvec;
  682. local_lock(&lru_pvecs.lock);
  683. pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree_movetail);
  684. get_page(page);
  685. if (pagevec_add_and_need_flush(pvec, page))
  686. pagevec_lru_move_fn(pvec,
  687. lru_lazyfree_movetail_fn, &tail);
  688. local_unlock(&lru_pvecs.lock);
  689. }
  690. }
  691. void lru_add_drain(void)
  692. {
  693. local_lock(&lru_pvecs.lock);
  694. lru_add_drain_cpu(smp_processor_id());
  695. local_unlock(&lru_pvecs.lock);
  696. }
  697. /*
  698. * It's called from per-cpu workqueue context in SMP case so
  699. * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
  700. * the same cpu. It shouldn't be a problem in !SMP case since
  701. * the core is only one and the locks will disable preemption.
  702. */
  703. static void lru_add_and_bh_lrus_drain(void)
  704. {
  705. local_lock(&lru_pvecs.lock);
  706. lru_add_drain_cpu(smp_processor_id());
  707. local_unlock(&lru_pvecs.lock);
  708. invalidate_bh_lrus_cpu();
  709. }
  710. void lru_add_drain_cpu_zone(struct zone *zone)
  711. {
  712. local_lock(&lru_pvecs.lock);
  713. lru_add_drain_cpu(smp_processor_id());
  714. drain_local_pages(zone);
  715. local_unlock(&lru_pvecs.lock);
  716. }
  717. #ifdef CONFIG_SMP
  718. static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
  719. static void lru_add_drain_per_cpu(struct work_struct *dummy)
  720. {
  721. lru_add_and_bh_lrus_drain();
  722. }
  723. /*
  724. * Doesn't need any cpu hotplug locking because we do rely on per-cpu
  725. * kworkers being shut down before our page_alloc_cpu_dead callback is
  726. * executed on the offlined cpu.
  727. * Calling this function with cpu hotplug locks held can actually lead
  728. * to obscure indirect dependencies via WQ context.
  729. */
  730. inline void __lru_add_drain_all(bool force_all_cpus)
  731. {
  732. /*
  733. * lru_drain_gen - Global pages generation number
  734. *
  735. * (A) Definition: global lru_drain_gen = x implies that all generations
  736. * 0 < n <= x are already *scheduled* for draining.
  737. *
  738. * This is an optimization for the highly-contended use case where a
  739. * user space workload keeps constantly generating a flow of pages for
  740. * each CPU.
  741. */
  742. static unsigned int lru_drain_gen;
  743. static struct cpumask has_work;
  744. static DEFINE_MUTEX(lock);
  745. unsigned cpu, this_gen;
  746. /*
  747. * Make sure nobody triggers this path before mm_percpu_wq is fully
  748. * initialized.
  749. */
  750. if (WARN_ON(!mm_percpu_wq))
  751. return;
  752. /*
  753. * Guarantee pagevec counter stores visible by this CPU are visible to
  754. * other CPUs before loading the current drain generation.
  755. */
  756. smp_mb();
  757. /*
  758. * (B) Locally cache global LRU draining generation number
  759. *
  760. * The read barrier ensures that the counter is loaded before the mutex
  761. * is taken. It pairs with smp_mb() inside the mutex critical section
  762. * at (D).
  763. */
  764. this_gen = smp_load_acquire(&lru_drain_gen);
  765. mutex_lock(&lock);
  766. /*
  767. * (C) Exit the draining operation if a newer generation, from another
  768. * lru_add_drain_all(), was already scheduled for draining. Check (A).
  769. */
  770. if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
  771. goto done;
  772. /*
  773. * (D) Increment global generation number
  774. *
  775. * Pairs with smp_load_acquire() at (B), outside of the critical
  776. * section. Use a full memory barrier to guarantee that the new global
  777. * drain generation number is stored before loading pagevec counters.
  778. *
  779. * This pairing must be done here, before the for_each_online_cpu loop
  780. * below which drains the page vectors.
  781. *
  782. * Let x, y, and z represent some system CPU numbers, where x < y < z.
  783. * Assume CPU #z is is in the middle of the for_each_online_cpu loop
  784. * below and has already reached CPU #y's per-cpu data. CPU #x comes
  785. * along, adds some pages to its per-cpu vectors, then calls
  786. * lru_add_drain_all().
  787. *
  788. * If the paired barrier is done at any later step, e.g. after the
  789. * loop, CPU #x will just exit at (C) and miss flushing out all of its
  790. * added pages.
  791. */
  792. WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
  793. smp_mb();
  794. cpumask_clear(&has_work);
  795. for_each_online_cpu(cpu) {
  796. struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
  797. if (force_all_cpus ||
  798. pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
  799. data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
  800. pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
  801. pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
  802. pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
  803. pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu)) ||
  804. need_activate_page_drain(cpu) ||
  805. has_bh_in_lru(cpu, NULL)) {
  806. INIT_WORK(work, lru_add_drain_per_cpu);
  807. queue_work_on(cpu, mm_percpu_wq, work);
  808. __cpumask_set_cpu(cpu, &has_work);
  809. }
  810. }
  811. for_each_cpu(cpu, &has_work)
  812. flush_work(&per_cpu(lru_add_drain_work, cpu));
  813. done:
  814. mutex_unlock(&lock);
  815. }
  816. void lru_add_drain_all(void)
  817. {
  818. __lru_add_drain_all(false);
  819. }
  820. #else
  821. void lru_add_drain_all(void)
  822. {
  823. lru_add_drain();
  824. }
  825. #endif /* CONFIG_SMP */
  826. static atomic_t lru_disable_count = ATOMIC_INIT(0);
  827. bool lru_cache_disabled(void)
  828. {
  829. return atomic_read(&lru_disable_count) != 0;
  830. }
  831. void lru_cache_enable(void)
  832. {
  833. atomic_dec(&lru_disable_count);
  834. }
  835. EXPORT_SYMBOL_GPL(lru_cache_enable);
  836. /*
  837. * lru_cache_disable() needs to be called before we start compiling
  838. * a list of pages to be migrated using isolate_lru_page().
  839. * It drains pages on LRU cache and then disable on all cpus until
  840. * lru_cache_enable is called.
  841. *
  842. * Must be paired with a call to lru_cache_enable().
  843. */
  844. void lru_cache_disable(void)
  845. {
  846. /*
  847. * If someone is already disabled lru_cache, just return with
  848. * increasing the lru_disable_count.
  849. */
  850. if (atomic_inc_not_zero(&lru_disable_count))
  851. return;
  852. #ifdef CONFIG_SMP
  853. /*
  854. * lru_add_drain_all in the force mode will schedule draining on
  855. * all online CPUs so any calls of lru_cache_disabled wrapped by
  856. * local_lock or preemption disabled would be ordered by that.
  857. * The atomic operation doesn't need to have stronger ordering
  858. * requirements because that is enforeced by the scheduling
  859. * guarantees.
  860. */
  861. __lru_add_drain_all(true);
  862. #else
  863. lru_add_and_bh_lrus_drain();
  864. #endif
  865. atomic_inc(&lru_disable_count);
  866. }
  867. EXPORT_SYMBOL_GPL(lru_cache_disable);
  868. /**
  869. * release_pages - batched put_page()
  870. * @pages: array of pages to release
  871. * @nr: number of pages
  872. *
  873. * Decrement the reference count on all the pages in @pages. If it
  874. * fell to zero, remove the page from the LRU and free it.
  875. */
  876. void release_pages(struct page **pages, int nr)
  877. {
  878. int i;
  879. LIST_HEAD(pages_to_free);
  880. struct pglist_data *locked_pgdat = NULL;
  881. struct lruvec *lruvec;
  882. unsigned long flags;
  883. unsigned int lock_batch;
  884. for (i = 0; i < nr; i++) {
  885. struct page *page = pages[i];
  886. /*
  887. * Make sure the IRQ-safe lock-holding time does not get
  888. * excessive with a continuous string of pages from the
  889. * same pgdat. The lock is held only if pgdat != NULL.
  890. */
  891. if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
  892. spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
  893. locked_pgdat = NULL;
  894. }
  895. page = compound_head(page);
  896. if (is_huge_zero_page(page))
  897. continue;
  898. if (is_zone_device_page(page)) {
  899. if (locked_pgdat) {
  900. spin_unlock_irqrestore(&locked_pgdat->lru_lock,
  901. flags);
  902. locked_pgdat = NULL;
  903. }
  904. /*
  905. * ZONE_DEVICE pages that return 'false' from
  906. * page_is_devmap_managed() do not require special
  907. * processing, and instead, expect a call to
  908. * put_page_testzero().
  909. */
  910. if (page_is_devmap_managed(page)) {
  911. put_devmap_managed_page(page);
  912. continue;
  913. }
  914. }
  915. if (!put_page_testzero(page))
  916. continue;
  917. if (PageCompound(page)) {
  918. if (locked_pgdat) {
  919. spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
  920. locked_pgdat = NULL;
  921. }
  922. __put_compound_page(page);
  923. continue;
  924. }
  925. if (PageLRU(page)) {
  926. struct pglist_data *pgdat = page_pgdat(page);
  927. if (pgdat != locked_pgdat) {
  928. if (locked_pgdat)
  929. spin_unlock_irqrestore(&locked_pgdat->lru_lock,
  930. flags);
  931. lock_batch = 0;
  932. locked_pgdat = pgdat;
  933. spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
  934. }
  935. lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
  936. VM_BUG_ON_PAGE(!PageLRU(page), page);
  937. __ClearPageLRU(page);
  938. del_page_from_lru_list(page, lruvec, page_off_lru(page));
  939. }
  940. __ClearPageWaiters(page);
  941. list_add(&page->lru, &pages_to_free);
  942. }
  943. if (locked_pgdat)
  944. spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
  945. mem_cgroup_uncharge_list(&pages_to_free);
  946. free_unref_page_list(&pages_to_free);
  947. }
  948. EXPORT_SYMBOL(release_pages);
  949. /*
  950. * The pages which we're about to release may be in the deferred lru-addition
  951. * queues. That would prevent them from really being freed right now. That's
  952. * OK from a correctness point of view but is inefficient - those pages may be
  953. * cache-warm and we want to give them back to the page allocator ASAP.
  954. *
  955. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  956. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  957. * mutual recursion.
  958. */
  959. void __pagevec_release(struct pagevec *pvec)
  960. {
  961. if (!pvec->percpu_pvec_drained) {
  962. lru_add_drain();
  963. pvec->percpu_pvec_drained = true;
  964. }
  965. release_pages(pvec->pages, pagevec_count(pvec));
  966. pagevec_reinit(pvec);
  967. }
  968. EXPORT_SYMBOL(__pagevec_release);
  969. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  970. /* used by __split_huge_page_refcount() */
  971. void lru_add_page_tail(struct page *page, struct page *page_tail,
  972. struct lruvec *lruvec, struct list_head *list)
  973. {
  974. VM_BUG_ON_PAGE(!PageHead(page), page);
  975. VM_BUG_ON_PAGE(PageCompound(page_tail), page);
  976. VM_BUG_ON_PAGE(PageLRU(page_tail), page);
  977. lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
  978. if (!list)
  979. SetPageLRU(page_tail);
  980. if (likely(PageLRU(page)))
  981. list_add_tail(&page_tail->lru, &page->lru);
  982. else if (list) {
  983. /* page reclaim is reclaiming a huge page */
  984. get_page(page_tail);
  985. list_add_tail(&page_tail->lru, list);
  986. } else {
  987. /*
  988. * Head page has not yet been counted, as an hpage,
  989. * so we must account for each subpage individually.
  990. *
  991. * Put page_tail on the list at the correct position
  992. * so they all end up in order.
  993. */
  994. add_page_to_lru_list_tail(page_tail, lruvec,
  995. page_lru(page_tail));
  996. }
  997. }
  998. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  999. static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
  1000. void *arg)
  1001. {
  1002. enum lru_list lru;
  1003. int was_unevictable = TestClearPageUnevictable(page);
  1004. int nr_pages = thp_nr_pages(page);
  1005. VM_BUG_ON_PAGE(PageLRU(page), page);
  1006. /*
  1007. * Page becomes evictable in two ways:
  1008. * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
  1009. * 2) Before acquiring LRU lock to put the page to correct LRU and then
  1010. * a) do PageLRU check with lock [check_move_unevictable_pages]
  1011. * b) do PageLRU check before lock [clear_page_mlock]
  1012. *
  1013. * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
  1014. * following strict ordering:
  1015. *
  1016. * #0: __pagevec_lru_add_fn #1: clear_page_mlock
  1017. *
  1018. * SetPageLRU() TestClearPageMlocked()
  1019. * smp_mb() // explicit ordering // above provides strict
  1020. * // ordering
  1021. * PageMlocked() PageLRU()
  1022. *
  1023. *
  1024. * if '#1' does not observe setting of PG_lru by '#0' and fails
  1025. * isolation, the explicit barrier will make sure that page_evictable
  1026. * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
  1027. * can be reordered after PageMlocked check and can make '#1' to fail
  1028. * the isolation of the page whose Mlocked bit is cleared (#0 is also
  1029. * looking at the same page) and the evictable page will be stranded
  1030. * in an unevictable LRU.
  1031. */
  1032. SetPageLRU(page);
  1033. smp_mb__after_atomic();
  1034. if (page_evictable(page)) {
  1035. lru = page_lru(page);
  1036. if (was_unevictable)
  1037. __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
  1038. } else {
  1039. lru = LRU_UNEVICTABLE;
  1040. ClearPageActive(page);
  1041. SetPageUnevictable(page);
  1042. if (!was_unevictable)
  1043. __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
  1044. }
  1045. add_page_to_lru_list(page, lruvec, lru);
  1046. trace_mm_lru_insertion(page, lru);
  1047. }
  1048. /*
  1049. * Add the passed pages to the LRU, then drop the caller's refcount
  1050. * on them. Reinitialises the caller's pagevec.
  1051. */
  1052. void __pagevec_lru_add(struct pagevec *pvec)
  1053. {
  1054. pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
  1055. }
  1056. /**
  1057. * pagevec_lookup_entries - gang pagecache lookup
  1058. * @pvec: Where the resulting entries are placed
  1059. * @mapping: The address_space to search
  1060. * @start: The starting entry index
  1061. * @nr_entries: The maximum number of pages
  1062. * @indices: The cache indices corresponding to the entries in @pvec
  1063. *
  1064. * pagevec_lookup_entries() will search for and return a group of up
  1065. * to @nr_pages pages and shadow entries in the mapping. All
  1066. * entries are placed in @pvec. pagevec_lookup_entries() takes a
  1067. * reference against actual pages in @pvec.
  1068. *
  1069. * The search returns a group of mapping-contiguous entries with
  1070. * ascending indexes. There may be holes in the indices due to
  1071. * not-present entries.
  1072. *
  1073. * Only one subpage of a Transparent Huge Page is returned in one call:
  1074. * allowing truncate_inode_pages_range() to evict the whole THP without
  1075. * cycling through a pagevec of extra references.
  1076. *
  1077. * pagevec_lookup_entries() returns the number of entries which were
  1078. * found.
  1079. */
  1080. unsigned pagevec_lookup_entries(struct pagevec *pvec,
  1081. struct address_space *mapping,
  1082. pgoff_t start, unsigned nr_entries,
  1083. pgoff_t *indices)
  1084. {
  1085. pvec->nr = find_get_entries(mapping, start, nr_entries,
  1086. pvec->pages, indices);
  1087. return pagevec_count(pvec);
  1088. }
  1089. /**
  1090. * pagevec_remove_exceptionals - pagevec exceptionals pruning
  1091. * @pvec: The pagevec to prune
  1092. *
  1093. * pagevec_lookup_entries() fills both pages and exceptional radix
  1094. * tree entries into the pagevec. This function prunes all
  1095. * exceptionals from @pvec without leaving holes, so that it can be
  1096. * passed on to page-only pagevec operations.
  1097. */
  1098. void pagevec_remove_exceptionals(struct pagevec *pvec)
  1099. {
  1100. int i, j;
  1101. for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
  1102. struct page *page = pvec->pages[i];
  1103. if (!xa_is_value(page))
  1104. pvec->pages[j++] = page;
  1105. }
  1106. pvec->nr = j;
  1107. }
  1108. /**
  1109. * pagevec_lookup_range - gang pagecache lookup
  1110. * @pvec: Where the resulting pages are placed
  1111. * @mapping: The address_space to search
  1112. * @start: The starting page index
  1113. * @end: The final page index
  1114. *
  1115. * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
  1116. * pages in the mapping starting from index @start and upto index @end
  1117. * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
  1118. * reference against the pages in @pvec.
  1119. *
  1120. * The search returns a group of mapping-contiguous pages with ascending
  1121. * indexes. There may be holes in the indices due to not-present pages. We
  1122. * also update @start to index the next page for the traversal.
  1123. *
  1124. * pagevec_lookup_range() returns the number of pages which were found. If this
  1125. * number is smaller than PAGEVEC_SIZE, the end of specified range has been
  1126. * reached.
  1127. */
  1128. unsigned pagevec_lookup_range(struct pagevec *pvec,
  1129. struct address_space *mapping, pgoff_t *start, pgoff_t end)
  1130. {
  1131. pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
  1132. pvec->pages);
  1133. return pagevec_count(pvec);
  1134. }
  1135. EXPORT_SYMBOL(pagevec_lookup_range);
  1136. unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
  1137. struct address_space *mapping, pgoff_t *index, pgoff_t end,
  1138. xa_mark_t tag)
  1139. {
  1140. pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
  1141. PAGEVEC_SIZE, pvec->pages);
  1142. return pagevec_count(pvec);
  1143. }
  1144. EXPORT_SYMBOL(pagevec_lookup_range_tag);
  1145. unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
  1146. struct address_space *mapping, pgoff_t *index, pgoff_t end,
  1147. xa_mark_t tag, unsigned max_pages)
  1148. {
  1149. pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
  1150. min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
  1151. return pagevec_count(pvec);
  1152. }
  1153. EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
  1154. /*
  1155. * Perform any setup for the swap system
  1156. */
  1157. void __init swap_setup(void)
  1158. {
  1159. unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
  1160. /* Use a smaller cluster for small-memory machines */
  1161. if (megs < 16)
  1162. page_cluster = 2;
  1163. else
  1164. page_cluster = 3;
  1165. /*
  1166. * Right now other parts of the system means that we
  1167. * _really_ don't want to cluster much more
  1168. */
  1169. }
  1170. #ifdef CONFIG_DEV_PAGEMAP_OPS
  1171. void put_devmap_managed_page(struct page *page)
  1172. {
  1173. int count;
  1174. if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
  1175. return;
  1176. count = page_ref_dec_return(page);
  1177. /*
  1178. * devmap page refcounts are 1-based, rather than 0-based: if
  1179. * refcount is 1, then the page is free and the refcount is
  1180. * stable because nobody holds a reference on the page.
  1181. */
  1182. if (count == 1)
  1183. free_devmap_managed_page(page);
  1184. else if (!count)
  1185. __put_page(page);
  1186. }
  1187. EXPORT_SYMBOL(put_devmap_managed_page);
  1188. #endif