pagemap.h 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_PAGEMAP_H
  3. #define _LINUX_PAGEMAP_H
  4. /*
  5. * Copyright 1995 Linus Torvalds
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/fs.h>
  9. #include <linux/list.h>
  10. #include <linux/highmem.h>
  11. #include <linux/compiler.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/gfp.h>
  14. #include <linux/bitops.h>
  15. #include <linux/hardirq.h> /* for in_interrupt() */
  16. #include <linux/hugetlb_inline.h>
  17. #include <linux/sched/debug.h>
  18. struct pagevec;
  19. /*
  20. * Bits in mapping->flags.
  21. */
  22. enum mapping_flags {
  23. AS_EIO = 0, /* IO error on async write */
  24. AS_ENOSPC = 1, /* ENOSPC on async write */
  25. AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
  26. AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
  27. AS_EXITING = 4, /* final truncate in progress */
  28. /* writeback related tags are not used */
  29. AS_NO_WRITEBACK_TAGS = 5,
  30. AS_THP_SUPPORT = 6, /* THPs supported */
  31. };
  32. /**
  33. * mapping_set_error - record a writeback error in the address_space
  34. * @mapping: the mapping in which an error should be set
  35. * @error: the error to set in the mapping
  36. *
  37. * When writeback fails in some way, we must record that error so that
  38. * userspace can be informed when fsync and the like are called. We endeavor
  39. * to report errors on any file that was open at the time of the error. Some
  40. * internal callers also need to know when writeback errors have occurred.
  41. *
  42. * When a writeback error occurs, most filesystems will want to call
  43. * mapping_set_error to record the error in the mapping so that it can be
  44. * reported when the application calls fsync(2).
  45. */
  46. static inline void mapping_set_error(struct address_space *mapping, int error)
  47. {
  48. if (likely(!error))
  49. return;
  50. /* Record in wb_err for checkers using errseq_t based tracking */
  51. __filemap_set_wb_err(mapping, error);
  52. /* Record it in superblock */
  53. if (mapping->host)
  54. errseq_set(&mapping->host->i_sb->s_wb_err, error);
  55. /* Record it in flags for now, for legacy callers */
  56. if (error == -ENOSPC)
  57. set_bit(AS_ENOSPC, &mapping->flags);
  58. else
  59. set_bit(AS_EIO, &mapping->flags);
  60. }
  61. static inline void mapping_set_unevictable(struct address_space *mapping)
  62. {
  63. set_bit(AS_UNEVICTABLE, &mapping->flags);
  64. }
  65. static inline void mapping_clear_unevictable(struct address_space *mapping)
  66. {
  67. clear_bit(AS_UNEVICTABLE, &mapping->flags);
  68. }
  69. static inline bool mapping_unevictable(struct address_space *mapping)
  70. {
  71. return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
  72. }
  73. static inline void mapping_set_exiting(struct address_space *mapping)
  74. {
  75. set_bit(AS_EXITING, &mapping->flags);
  76. }
  77. static inline int mapping_exiting(struct address_space *mapping)
  78. {
  79. return test_bit(AS_EXITING, &mapping->flags);
  80. }
  81. static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  82. {
  83. set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  84. }
  85. static inline int mapping_use_writeback_tags(struct address_space *mapping)
  86. {
  87. return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  88. }
  89. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  90. {
  91. return mapping->gfp_mask;
  92. }
  93. /* Restricts the given gfp_mask to what the mapping allows. */
  94. static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  95. gfp_t gfp_mask)
  96. {
  97. return mapping_gfp_mask(mapping) & gfp_mask;
  98. }
  99. /*
  100. * This is non-atomic. Only to be used before the mapping is activated.
  101. * Probably needs a barrier...
  102. */
  103. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  104. {
  105. m->gfp_mask = mask;
  106. }
  107. static inline bool mapping_thp_support(struct address_space *mapping)
  108. {
  109. return test_bit(AS_THP_SUPPORT, &mapping->flags);
  110. }
  111. static inline int filemap_nr_thps(struct address_space *mapping)
  112. {
  113. #ifdef CONFIG_READ_ONLY_THP_FOR_FS
  114. return atomic_read(&mapping->nr_thps);
  115. #else
  116. return 0;
  117. #endif
  118. }
  119. static inline void filemap_nr_thps_inc(struct address_space *mapping)
  120. {
  121. #ifdef CONFIG_READ_ONLY_THP_FOR_FS
  122. if (!mapping_thp_support(mapping))
  123. atomic_inc(&mapping->nr_thps);
  124. #else
  125. WARN_ON_ONCE(1);
  126. #endif
  127. }
  128. static inline void filemap_nr_thps_dec(struct address_space *mapping)
  129. {
  130. #ifdef CONFIG_READ_ONLY_THP_FOR_FS
  131. if (!mapping_thp_support(mapping))
  132. atomic_dec(&mapping->nr_thps);
  133. #else
  134. WARN_ON_ONCE(1);
  135. #endif
  136. }
  137. void release_pages(struct page **pages, int nr);
  138. /*
  139. * speculatively take a reference to a page.
  140. * If the page is free (_refcount == 0), then _refcount is untouched, and 0
  141. * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
  142. *
  143. * This function must be called inside the same rcu_read_lock() section as has
  144. * been used to lookup the page in the pagecache radix-tree (or page table):
  145. * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
  146. *
  147. * Unless an RCU grace period has passed, the count of all pages coming out
  148. * of the allocator must be considered unstable. page_count may return higher
  149. * than expected, and put_page must be able to do the right thing when the
  150. * page has been finished with, no matter what it is subsequently allocated
  151. * for (because put_page is what is used here to drop an invalid speculative
  152. * reference).
  153. *
  154. * This is the interesting part of the lockless pagecache (and lockless
  155. * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
  156. * has the following pattern:
  157. * 1. find page in radix tree
  158. * 2. conditionally increment refcount
  159. * 3. check the page is still in pagecache (if no, goto 1)
  160. *
  161. * Remove-side that cares about stability of _refcount (eg. reclaim) has the
  162. * following (with the i_pages lock held):
  163. * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
  164. * B. remove page from pagecache
  165. * C. free the page
  166. *
  167. * There are 2 critical interleavings that matter:
  168. * - 2 runs before A: in this case, A sees elevated refcount and bails out
  169. * - A runs before 2: in this case, 2 sees zero refcount and retries;
  170. * subsequently, B will complete and 1 will find no page, causing the
  171. * lookup to return NULL.
  172. *
  173. * It is possible that between 1 and 2, the page is removed then the exact same
  174. * page is inserted into the same position in pagecache. That's OK: the
  175. * old find_get_page using a lock could equally have run before or after
  176. * such a re-insertion, depending on order that locks are granted.
  177. *
  178. * Lookups racing against pagecache insertion isn't a big problem: either 1
  179. * will find the page or it will not. Likewise, the old find_get_page could run
  180. * either before the insertion or afterwards, depending on timing.
  181. */
  182. static inline int __page_cache_add_speculative(struct page *page, int count)
  183. {
  184. #ifdef CONFIG_TINY_RCU
  185. # ifdef CONFIG_PREEMPT_COUNT
  186. VM_BUG_ON(!in_atomic() && !irqs_disabled());
  187. # endif
  188. /*
  189. * Preempt must be disabled here - we rely on rcu_read_lock doing
  190. * this for us.
  191. *
  192. * Pagecache won't be truncated from interrupt context, so if we have
  193. * found a page in the radix tree here, we have pinned its refcount by
  194. * disabling preempt, and hence no need for the "speculative get" that
  195. * SMP requires.
  196. */
  197. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  198. page_ref_add(page, count);
  199. #else
  200. if (unlikely(!page_ref_add_unless(page, count, 0))) {
  201. /*
  202. * Either the page has been freed, or will be freed.
  203. * In either case, retry here and the caller should
  204. * do the right thing (see comments above).
  205. */
  206. return 0;
  207. }
  208. #endif
  209. VM_BUG_ON_PAGE(PageTail(page), page);
  210. return 1;
  211. }
  212. static inline int page_cache_get_speculative(struct page *page)
  213. {
  214. return __page_cache_add_speculative(page, 1);
  215. }
  216. static inline int page_cache_add_speculative(struct page *page, int count)
  217. {
  218. return __page_cache_add_speculative(page, count);
  219. }
  220. /**
  221. * attach_page_private - Attach private data to a page.
  222. * @page: Page to attach data to.
  223. * @data: Data to attach to page.
  224. *
  225. * Attaching private data to a page increments the page's reference count.
  226. * The data must be detached before the page will be freed.
  227. */
  228. static inline void attach_page_private(struct page *page, void *data)
  229. {
  230. get_page(page);
  231. set_page_private(page, (unsigned long)data);
  232. SetPagePrivate(page);
  233. }
  234. /**
  235. * detach_page_private - Detach private data from a page.
  236. * @page: Page to detach data from.
  237. *
  238. * Removes the data that was previously attached to the page and decrements
  239. * the refcount on the page.
  240. *
  241. * Return: Data that was attached to the page.
  242. */
  243. static inline void *detach_page_private(struct page *page)
  244. {
  245. void *data = (void *)page_private(page);
  246. if (!PagePrivate(page))
  247. return NULL;
  248. ClearPagePrivate(page);
  249. set_page_private(page, 0);
  250. put_page(page);
  251. return data;
  252. }
  253. #ifdef CONFIG_NUMA
  254. extern struct page *__page_cache_alloc(gfp_t gfp);
  255. #else
  256. static inline struct page *__page_cache_alloc(gfp_t gfp)
  257. {
  258. return alloc_pages(gfp, 0);
  259. }
  260. #endif
  261. static inline struct page *page_cache_alloc(struct address_space *x)
  262. {
  263. return __page_cache_alloc(mapping_gfp_mask(x));
  264. }
  265. gfp_t readahead_gfp_mask(struct address_space *x);
  266. typedef int filler_t(void *, struct page *);
  267. pgoff_t page_cache_next_miss(struct address_space *mapping,
  268. pgoff_t index, unsigned long max_scan);
  269. pgoff_t page_cache_prev_miss(struct address_space *mapping,
  270. pgoff_t index, unsigned long max_scan);
  271. #define FGP_ACCESSED 0x00000001
  272. #define FGP_LOCK 0x00000002
  273. #define FGP_CREAT 0x00000004
  274. #define FGP_WRITE 0x00000008
  275. #define FGP_NOFS 0x00000010
  276. #define FGP_NOWAIT 0x00000020
  277. #define FGP_FOR_MMAP 0x00000040
  278. #define FGP_HEAD 0x00000080
  279. struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
  280. int fgp_flags, gfp_t cache_gfp_mask);
  281. /**
  282. * find_get_page - find and get a page reference
  283. * @mapping: the address_space to search
  284. * @offset: the page index
  285. *
  286. * Looks up the page cache slot at @mapping & @offset. If there is a
  287. * page cache page, it is returned with an increased refcount.
  288. *
  289. * Otherwise, %NULL is returned.
  290. */
  291. static inline struct page *find_get_page(struct address_space *mapping,
  292. pgoff_t offset)
  293. {
  294. return pagecache_get_page(mapping, offset, 0, 0);
  295. }
  296. static inline struct page *find_get_page_flags(struct address_space *mapping,
  297. pgoff_t offset, int fgp_flags)
  298. {
  299. return pagecache_get_page(mapping, offset, fgp_flags, 0);
  300. }
  301. /**
  302. * find_lock_page - locate, pin and lock a pagecache page
  303. * @mapping: the address_space to search
  304. * @index: the page index
  305. *
  306. * Looks up the page cache entry at @mapping & @index. If there is a
  307. * page cache page, it is returned locked and with an increased
  308. * refcount.
  309. *
  310. * Context: May sleep.
  311. * Return: A struct page or %NULL if there is no page in the cache for this
  312. * index.
  313. */
  314. static inline struct page *find_lock_page(struct address_space *mapping,
  315. pgoff_t index)
  316. {
  317. return pagecache_get_page(mapping, index, FGP_LOCK, 0);
  318. }
  319. /**
  320. * find_lock_head - Locate, pin and lock a pagecache page.
  321. * @mapping: The address_space to search.
  322. * @index: The page index.
  323. *
  324. * Looks up the page cache entry at @mapping & @index. If there is a
  325. * page cache page, its head page is returned locked and with an increased
  326. * refcount.
  327. *
  328. * Context: May sleep.
  329. * Return: A struct page which is !PageTail, or %NULL if there is no page
  330. * in the cache for this index.
  331. */
  332. static inline struct page *find_lock_head(struct address_space *mapping,
  333. pgoff_t index)
  334. {
  335. return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0);
  336. }
  337. /**
  338. * find_or_create_page - locate or add a pagecache page
  339. * @mapping: the page's address_space
  340. * @index: the page's index into the mapping
  341. * @gfp_mask: page allocation mode
  342. *
  343. * Looks up the page cache slot at @mapping & @offset. If there is a
  344. * page cache page, it is returned locked and with an increased
  345. * refcount.
  346. *
  347. * If the page is not present, a new page is allocated using @gfp_mask
  348. * and added to the page cache and the VM's LRU list. The page is
  349. * returned locked and with an increased refcount.
  350. *
  351. * On memory exhaustion, %NULL is returned.
  352. *
  353. * find_or_create_page() may sleep, even if @gfp_flags specifies an
  354. * atomic allocation!
  355. */
  356. static inline struct page *find_or_create_page(struct address_space *mapping,
  357. pgoff_t index, gfp_t gfp_mask)
  358. {
  359. return pagecache_get_page(mapping, index,
  360. FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
  361. gfp_mask);
  362. }
  363. /**
  364. * grab_cache_page_nowait - returns locked page at given index in given cache
  365. * @mapping: target address_space
  366. * @index: the page index
  367. *
  368. * Same as grab_cache_page(), but do not wait if the page is unavailable.
  369. * This is intended for speculative data generators, where the data can
  370. * be regenerated if the page couldn't be grabbed. This routine should
  371. * be safe to call while holding the lock for another page.
  372. *
  373. * Clear __GFP_FS when allocating the page to avoid recursion into the fs
  374. * and deadlock against the caller's locked page.
  375. */
  376. static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
  377. pgoff_t index)
  378. {
  379. return pagecache_get_page(mapping, index,
  380. FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
  381. mapping_gfp_mask(mapping));
  382. }
  383. /* Does this page contain this index? */
  384. static inline bool thp_contains(struct page *head, pgoff_t index)
  385. {
  386. /* HugeTLBfs indexes the page cache in units of hpage_size */
  387. if (PageHuge(head))
  388. return head->index == index;
  389. return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
  390. }
  391. /*
  392. * Given the page we found in the page cache, return the page corresponding
  393. * to this index in the file
  394. */
  395. static inline struct page *find_subpage(struct page *head, pgoff_t index)
  396. {
  397. /* HugeTLBfs wants the head page regardless */
  398. if (PageHuge(head))
  399. return head;
  400. return head + (index & (thp_nr_pages(head) - 1));
  401. }
  402. unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  403. unsigned int nr_entries, struct page **entries,
  404. pgoff_t *indices);
  405. unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
  406. pgoff_t end, unsigned int nr_pages,
  407. struct page **pages);
  408. static inline unsigned find_get_pages(struct address_space *mapping,
  409. pgoff_t *start, unsigned int nr_pages,
  410. struct page **pages)
  411. {
  412. return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
  413. pages);
  414. }
  415. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  416. unsigned int nr_pages, struct page **pages);
  417. unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
  418. pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
  419. struct page **pages);
  420. static inline unsigned find_get_pages_tag(struct address_space *mapping,
  421. pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
  422. struct page **pages)
  423. {
  424. return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
  425. nr_pages, pages);
  426. }
  427. struct page *grab_cache_page_write_begin(struct address_space *mapping,
  428. pgoff_t index, unsigned flags);
  429. /*
  430. * Returns locked page at given index in given cache, creating it if needed.
  431. */
  432. static inline struct page *grab_cache_page(struct address_space *mapping,
  433. pgoff_t index)
  434. {
  435. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  436. }
  437. extern struct page * read_cache_page(struct address_space *mapping,
  438. pgoff_t index, filler_t *filler, void *data);
  439. extern struct page * read_cache_page_gfp(struct address_space *mapping,
  440. pgoff_t index, gfp_t gfp_mask);
  441. extern int read_cache_pages(struct address_space *mapping,
  442. struct list_head *pages, filler_t *filler, void *data);
  443. static inline struct page *read_mapping_page(struct address_space *mapping,
  444. pgoff_t index, void *data)
  445. {
  446. return read_cache_page(mapping, index, NULL, data);
  447. }
  448. /*
  449. * Get index of the page within radix-tree (but not for hugetlb pages).
  450. * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
  451. */
  452. static inline pgoff_t page_to_index(struct page *page)
  453. {
  454. pgoff_t pgoff;
  455. if (likely(!PageTransTail(page)))
  456. return page->index;
  457. /*
  458. * We don't initialize ->index for tail pages: calculate based on
  459. * head page
  460. */
  461. pgoff = compound_head(page)->index;
  462. pgoff += page - compound_head(page);
  463. return pgoff;
  464. }
  465. extern pgoff_t hugetlb_basepage_index(struct page *page);
  466. /*
  467. * Get the offset in PAGE_SIZE (even for hugetlb pages).
  468. * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
  469. */
  470. static inline pgoff_t page_to_pgoff(struct page *page)
  471. {
  472. if (unlikely(PageHuge(page)))
  473. return hugetlb_basepage_index(page);
  474. return page_to_index(page);
  475. }
  476. /*
  477. * Return byte-offset into filesystem object for page.
  478. */
  479. static inline loff_t page_offset(struct page *page)
  480. {
  481. return ((loff_t)page->index) << PAGE_SHIFT;
  482. }
  483. static inline loff_t page_file_offset(struct page *page)
  484. {
  485. return ((loff_t)page_index(page)) << PAGE_SHIFT;
  486. }
  487. extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  488. unsigned long address);
  489. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  490. unsigned long address)
  491. {
  492. pgoff_t pgoff;
  493. if (unlikely(is_vm_hugetlb_page(vma)))
  494. return linear_hugepage_index(vma, address);
  495. pgoff = (address - READ_ONCE(vma->vm_start)) >> PAGE_SHIFT;
  496. pgoff += READ_ONCE(vma->vm_pgoff);
  497. return pgoff;
  498. }
  499. struct wait_page_key {
  500. struct page *page;
  501. int bit_nr;
  502. int page_match;
  503. };
  504. struct wait_page_queue {
  505. struct page *page;
  506. int bit_nr;
  507. wait_queue_entry_t wait;
  508. };
  509. static inline bool wake_page_match(struct wait_page_queue *wait_page,
  510. struct wait_page_key *key)
  511. {
  512. if (wait_page->page != key->page)
  513. return false;
  514. key->page_match = 1;
  515. if (wait_page->bit_nr != key->bit_nr)
  516. return false;
  517. return true;
  518. }
  519. extern void __lock_page(struct page *page);
  520. extern int __lock_page_killable(struct page *page);
  521. extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
  522. extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  523. unsigned int flags);
  524. extern void unlock_page(struct page *page);
  525. /*
  526. * Return true if the page was successfully locked
  527. */
  528. static inline int trylock_page(struct page *page)
  529. {
  530. page = compound_head(page);
  531. return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
  532. }
  533. /*
  534. * lock_page may only be called if we have the page's inode pinned.
  535. */
  536. static inline __sched void lock_page(struct page *page)
  537. {
  538. might_sleep();
  539. if (!trylock_page(page))
  540. __lock_page(page);
  541. }
  542. /*
  543. * lock_page_killable is like lock_page but can be interrupted by fatal
  544. * signals. It returns 0 if it locked the page and -EINTR if it was
  545. * killed while waiting.
  546. */
  547. static inline __sched int lock_page_killable(struct page *page)
  548. {
  549. might_sleep();
  550. if (!trylock_page(page))
  551. return __lock_page_killable(page);
  552. return 0;
  553. }
  554. /*
  555. * lock_page_async - Lock the page, unless this would block. If the page
  556. * is already locked, then queue a callback when the page becomes unlocked.
  557. * This callback can then retry the operation.
  558. *
  559. * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
  560. * was already locked and the callback defined in 'wait' was queued.
  561. */
  562. static inline __sched int lock_page_async(struct page *page,
  563. struct wait_page_queue *wait)
  564. {
  565. if (!trylock_page(page))
  566. return __lock_page_async(page, wait);
  567. return 0;
  568. }
  569. /*
  570. * lock_page_or_retry - Lock the page, unless this would block and the
  571. * caller indicated that it can handle a retry.
  572. *
  573. * Return value and mmap_lock implications depend on flags; see
  574. * __lock_page_or_retry().
  575. */
  576. static inline __sched int lock_page_or_retry(struct page *page, struct mm_struct *mm,
  577. unsigned int flags)
  578. {
  579. might_sleep();
  580. return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
  581. }
  582. /*
  583. * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
  584. * and should not be used directly.
  585. */
  586. extern void wait_on_page_bit(struct page *page, int bit_nr);
  587. extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
  588. /*
  589. * Wait for a page to be unlocked.
  590. *
  591. * This must be called with the caller "holding" the page,
  592. * ie with increased "page->count" so that the page won't
  593. * go away during the wait..
  594. */
  595. static inline __sched void wait_on_page_locked(struct page *page)
  596. {
  597. if (PageLocked(page))
  598. wait_on_page_bit(compound_head(page), PG_locked);
  599. }
  600. static inline __sched int wait_on_page_locked_killable(struct page *page)
  601. {
  602. if (!PageLocked(page))
  603. return 0;
  604. return wait_on_page_bit_killable(compound_head(page), PG_locked);
  605. }
  606. extern void put_and_wait_on_page_locked(struct page *page);
  607. void wait_on_page_writeback(struct page *page);
  608. extern void end_page_writeback(struct page *page);
  609. void wait_for_stable_page(struct page *page);
  610. void page_endio(struct page *page, bool is_write, int err);
  611. /*
  612. * Add an arbitrary waiter to a page's wait queue
  613. */
  614. extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
  615. /*
  616. * Fault everything in given userspace address range in.
  617. */
  618. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  619. {
  620. char __user *end = uaddr + size - 1;
  621. if (unlikely(size == 0))
  622. return 0;
  623. if (unlikely(uaddr > end))
  624. return -EFAULT;
  625. /*
  626. * Writing zeroes into userspace here is OK, because we know that if
  627. * the zero gets there, we'll be overwriting it.
  628. */
  629. do {
  630. if (unlikely(__put_user(0, uaddr) != 0))
  631. return -EFAULT;
  632. uaddr += PAGE_SIZE;
  633. } while (uaddr <= end);
  634. /* Check whether the range spilled into the next page. */
  635. if (((unsigned long)uaddr & PAGE_MASK) ==
  636. ((unsigned long)end & PAGE_MASK))
  637. return __put_user(0, end);
  638. return 0;
  639. }
  640. static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  641. {
  642. volatile char c;
  643. const char __user *end = uaddr + size - 1;
  644. if (unlikely(size == 0))
  645. return 0;
  646. if (unlikely(uaddr > end))
  647. return -EFAULT;
  648. do {
  649. if (unlikely(__get_user(c, uaddr) != 0))
  650. return -EFAULT;
  651. uaddr += PAGE_SIZE;
  652. } while (uaddr <= end);
  653. /* Check whether the range spilled into the next page. */
  654. if (((unsigned long)uaddr & PAGE_MASK) ==
  655. ((unsigned long)end & PAGE_MASK)) {
  656. return __get_user(c, end);
  657. }
  658. (void)c;
  659. return 0;
  660. }
  661. int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  662. pgoff_t index, gfp_t gfp_mask);
  663. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  664. pgoff_t index, gfp_t gfp_mask);
  665. extern void delete_from_page_cache(struct page *page);
  666. extern void __delete_from_page_cache(struct page *page, void *shadow);
  667. int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
  668. void delete_from_page_cache_batch(struct address_space *mapping,
  669. struct pagevec *pvec);
  670. /*
  671. * Like add_to_page_cache_locked, but used to add newly allocated pages:
  672. * the page is new, so we can just run __SetPageLocked() against it.
  673. */
  674. static inline int add_to_page_cache(struct page *page,
  675. struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  676. {
  677. int error;
  678. __SetPageLocked(page);
  679. error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  680. if (unlikely(error))
  681. __ClearPageLocked(page);
  682. return error;
  683. }
  684. /**
  685. * struct readahead_control - Describes a readahead request.
  686. *
  687. * A readahead request is for consecutive pages. Filesystems which
  688. * implement the ->readahead method should call readahead_page() or
  689. * readahead_page_batch() in a loop and attempt to start I/O against
  690. * each page in the request.
  691. *
  692. * Most of the fields in this struct are private and should be accessed
  693. * by the functions below.
  694. *
  695. * @file: The file, used primarily by network filesystems for authentication.
  696. * May be NULL if invoked internally by the filesystem.
  697. * @mapping: Readahead this filesystem object.
  698. */
  699. struct readahead_control {
  700. struct file *file;
  701. struct address_space *mapping;
  702. /* private: use the readahead_* accessors instead */
  703. pgoff_t _index;
  704. unsigned int _nr_pages;
  705. unsigned int _batch_count;
  706. };
  707. #define DEFINE_READAHEAD(rac, f, m, i) \
  708. struct readahead_control rac = { \
  709. .file = f, \
  710. .mapping = m, \
  711. ._index = i, \
  712. }
  713. #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
  714. void page_cache_ra_unbounded(struct readahead_control *,
  715. unsigned long nr_to_read, unsigned long lookahead_count);
  716. void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
  717. unsigned long req_count);
  718. void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
  719. struct page *, unsigned long req_count);
  720. /**
  721. * page_cache_sync_readahead - generic file readahead
  722. * @mapping: address_space which holds the pagecache and I/O vectors
  723. * @ra: file_ra_state which holds the readahead state
  724. * @file: Used by the filesystem for authentication.
  725. * @index: Index of first page to be read.
  726. * @req_count: Total number of pages being read by the caller.
  727. *
  728. * page_cache_sync_readahead() should be called when a cache miss happened:
  729. * it will submit the read. The readahead logic may decide to piggyback more
  730. * pages onto the read request if access patterns suggest it will improve
  731. * performance.
  732. */
  733. static inline
  734. void page_cache_sync_readahead(struct address_space *mapping,
  735. struct file_ra_state *ra, struct file *file, pgoff_t index,
  736. unsigned long req_count)
  737. {
  738. DEFINE_READAHEAD(ractl, file, mapping, index);
  739. page_cache_sync_ra(&ractl, ra, req_count);
  740. }
  741. /**
  742. * page_cache_async_readahead - file readahead for marked pages
  743. * @mapping: address_space which holds the pagecache and I/O vectors
  744. * @ra: file_ra_state which holds the readahead state
  745. * @file: Used by the filesystem for authentication.
  746. * @page: The page at @index which triggered the readahead call.
  747. * @index: Index of first page to be read.
  748. * @req_count: Total number of pages being read by the caller.
  749. *
  750. * page_cache_async_readahead() should be called when a page is used which
  751. * is marked as PageReadahead; this is a marker to suggest that the application
  752. * has used up enough of the readahead window that we should start pulling in
  753. * more pages.
  754. */
  755. static inline
  756. void page_cache_async_readahead(struct address_space *mapping,
  757. struct file_ra_state *ra, struct file *file,
  758. struct page *page, pgoff_t index, unsigned long req_count)
  759. {
  760. DEFINE_READAHEAD(ractl, file, mapping, index);
  761. page_cache_async_ra(&ractl, ra, page, req_count);
  762. }
  763. /**
  764. * readahead_page - Get the next page to read.
  765. * @rac: The current readahead request.
  766. *
  767. * Context: The page is locked and has an elevated refcount. The caller
  768. * should decreases the refcount once the page has been submitted for I/O
  769. * and unlock the page once all I/O to that page has completed.
  770. * Return: A pointer to the next page, or %NULL if we are done.
  771. */
  772. static inline struct page *readahead_page(struct readahead_control *rac)
  773. {
  774. struct page *page;
  775. BUG_ON(rac->_batch_count > rac->_nr_pages);
  776. rac->_nr_pages -= rac->_batch_count;
  777. rac->_index += rac->_batch_count;
  778. if (!rac->_nr_pages) {
  779. rac->_batch_count = 0;
  780. return NULL;
  781. }
  782. page = xa_load(&rac->mapping->i_pages, rac->_index);
  783. VM_BUG_ON_PAGE(!PageLocked(page), page);
  784. rac->_batch_count = thp_nr_pages(page);
  785. return page;
  786. }
  787. static inline unsigned int __readahead_batch(struct readahead_control *rac,
  788. struct page **array, unsigned int array_sz)
  789. {
  790. unsigned int i = 0;
  791. XA_STATE(xas, &rac->mapping->i_pages, 0);
  792. struct page *page;
  793. BUG_ON(rac->_batch_count > rac->_nr_pages);
  794. rac->_nr_pages -= rac->_batch_count;
  795. rac->_index += rac->_batch_count;
  796. rac->_batch_count = 0;
  797. xas_set(&xas, rac->_index);
  798. rcu_read_lock();
  799. xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
  800. if (xas_retry(&xas, page))
  801. continue;
  802. VM_BUG_ON_PAGE(!PageLocked(page), page);
  803. VM_BUG_ON_PAGE(PageTail(page), page);
  804. array[i++] = page;
  805. rac->_batch_count += thp_nr_pages(page);
  806. /*
  807. * The page cache isn't using multi-index entries yet,
  808. * so the xas cursor needs to be manually moved to the
  809. * next index. This can be removed once the page cache
  810. * is converted.
  811. */
  812. if (PageHead(page))
  813. xas_set(&xas, rac->_index + rac->_batch_count);
  814. if (i == array_sz)
  815. break;
  816. }
  817. rcu_read_unlock();
  818. return i;
  819. }
  820. /**
  821. * readahead_page_batch - Get a batch of pages to read.
  822. * @rac: The current readahead request.
  823. * @array: An array of pointers to struct page.
  824. *
  825. * Context: The pages are locked and have an elevated refcount. The caller
  826. * should decreases the refcount once the page has been submitted for I/O
  827. * and unlock the page once all I/O to that page has completed.
  828. * Return: The number of pages placed in the array. 0 indicates the request
  829. * is complete.
  830. */
  831. #define readahead_page_batch(rac, array) \
  832. __readahead_batch(rac, array, ARRAY_SIZE(array))
  833. /**
  834. * readahead_pos - The byte offset into the file of this readahead request.
  835. * @rac: The readahead request.
  836. */
  837. static inline loff_t readahead_pos(struct readahead_control *rac)
  838. {
  839. return (loff_t)rac->_index * PAGE_SIZE;
  840. }
  841. /**
  842. * readahead_length - The number of bytes in this readahead request.
  843. * @rac: The readahead request.
  844. */
  845. static inline loff_t readahead_length(struct readahead_control *rac)
  846. {
  847. return (loff_t)rac->_nr_pages * PAGE_SIZE;
  848. }
  849. /**
  850. * readahead_index - The index of the first page in this readahead request.
  851. * @rac: The readahead request.
  852. */
  853. static inline pgoff_t readahead_index(struct readahead_control *rac)
  854. {
  855. return rac->_index;
  856. }
  857. /**
  858. * readahead_count - The number of pages in this readahead request.
  859. * @rac: The readahead request.
  860. */
  861. static inline unsigned int readahead_count(struct readahead_control *rac)
  862. {
  863. return rac->_nr_pages;
  864. }
  865. static inline unsigned long dir_pages(struct inode *inode)
  866. {
  867. return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
  868. PAGE_SHIFT;
  869. }
  870. /**
  871. * page_mkwrite_check_truncate - check if page was truncated
  872. * @page: the page to check
  873. * @inode: the inode to check the page against
  874. *
  875. * Returns the number of bytes in the page up to EOF,
  876. * or -EFAULT if the page was truncated.
  877. */
  878. static inline int page_mkwrite_check_truncate(struct page *page,
  879. struct inode *inode)
  880. {
  881. loff_t size = i_size_read(inode);
  882. pgoff_t index = size >> PAGE_SHIFT;
  883. int offset = offset_in_page(size);
  884. if (page->mapping != inode->i_mapping)
  885. return -EFAULT;
  886. /* page is wholly inside EOF */
  887. if (page->index < index)
  888. return PAGE_SIZE;
  889. /* page is wholly past EOF */
  890. if (page->index > index || !offset)
  891. return -EFAULT;
  892. /* page is partially inside EOF */
  893. return offset;
  894. }
  895. /**
  896. * i_blocks_per_page - How many blocks fit in this page.
  897. * @inode: The inode which contains the blocks.
  898. * @page: The page (head page if the page is a THP).
  899. *
  900. * If the block size is larger than the size of this page, return zero.
  901. *
  902. * Context: The caller should hold a refcount on the page to prevent it
  903. * from being split.
  904. * Return: The number of filesystem blocks covered by this page.
  905. */
  906. static inline
  907. unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
  908. {
  909. return thp_size(page) >> inode->i_blkbits;
  910. }
  911. #endif /* _LINUX_PAGEMAP_H */