truncate.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * mm/truncate.c - code for taking down pages from address_spaces
  4. *
  5. * Copyright (C) 2002, Linus Torvalds
  6. *
  7. * 10Sep2002 Andrew Morton
  8. * Initial version.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/backing-dev.h>
  12. #include <linux/dax.h>
  13. #include <linux/gfp.h>
  14. #include <linux/mm.h>
  15. #include <linux/swap.h>
  16. #include <linux/export.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagevec.h>
  20. #include <linux/task_io_accounting_ops.h>
  21. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  22. do_invalidatepage */
  23. #include <linux/shmem_fs.h>
  24. #include <linux/cleancache.h>
  25. #include <linux/rmap.h>
  26. #include "internal.h"
  27. /*
  28. * Regular page slots are stabilized by the page lock even without the tree
  29. * itself locked. These unlocked entries need verification under the tree
  30. * lock.
  31. */
  32. static inline void __clear_shadow_entry(struct address_space *mapping,
  33. pgoff_t index, void *entry)
  34. {
  35. XA_STATE(xas, &mapping->i_pages, index);
  36. xas_set_update(&xas, workingset_update_node);
  37. if (xas_load(&xas) != entry)
  38. return;
  39. xas_store(&xas, NULL);
  40. mapping->nrexceptional--;
  41. }
  42. static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  43. void *entry)
  44. {
  45. xa_lock_irq(&mapping->i_pages);
  46. __clear_shadow_entry(mapping, index, entry);
  47. xa_unlock_irq(&mapping->i_pages);
  48. }
  49. /*
  50. * Unconditionally remove exceptional entries. Usually called from truncate
  51. * path. Note that the pagevec may be altered by this function by removing
  52. * exceptional entries similar to what pagevec_remove_exceptionals does.
  53. */
  54. static void truncate_exceptional_pvec_entries(struct address_space *mapping,
  55. struct pagevec *pvec, pgoff_t *indices,
  56. pgoff_t end)
  57. {
  58. int i, j;
  59. bool dax, lock;
  60. /* Handled by shmem itself */
  61. if (shmem_mapping(mapping))
  62. return;
  63. for (j = 0; j < pagevec_count(pvec); j++)
  64. if (xa_is_value(pvec->pages[j]))
  65. break;
  66. if (j == pagevec_count(pvec))
  67. return;
  68. dax = dax_mapping(mapping);
  69. lock = !dax && indices[j] < end;
  70. if (lock)
  71. xa_lock_irq(&mapping->i_pages);
  72. for (i = j; i < pagevec_count(pvec); i++) {
  73. struct page *page = pvec->pages[i];
  74. pgoff_t index = indices[i];
  75. if (!xa_is_value(page)) {
  76. pvec->pages[j++] = page;
  77. continue;
  78. }
  79. if (index >= end)
  80. continue;
  81. if (unlikely(dax)) {
  82. dax_delete_mapping_entry(mapping, index);
  83. continue;
  84. }
  85. __clear_shadow_entry(mapping, index, page);
  86. }
  87. if (lock)
  88. xa_unlock_irq(&mapping->i_pages);
  89. pvec->nr = j;
  90. }
  91. /*
  92. * Invalidate exceptional entry if easily possible. This handles exceptional
  93. * entries for invalidate_inode_pages().
  94. */
  95. static int invalidate_exceptional_entry(struct address_space *mapping,
  96. pgoff_t index, void *entry)
  97. {
  98. /* Handled by shmem itself, or for DAX we do nothing. */
  99. if (shmem_mapping(mapping) || dax_mapping(mapping))
  100. return 1;
  101. clear_shadow_entry(mapping, index, entry);
  102. return 1;
  103. }
  104. /*
  105. * Invalidate exceptional entry if clean. This handles exceptional entries for
  106. * invalidate_inode_pages2() so for DAX it evicts only clean entries.
  107. */
  108. static int invalidate_exceptional_entry2(struct address_space *mapping,
  109. pgoff_t index, void *entry)
  110. {
  111. /* Handled by shmem itself */
  112. if (shmem_mapping(mapping))
  113. return 1;
  114. if (dax_mapping(mapping))
  115. return dax_invalidate_mapping_entry_sync(mapping, index);
  116. clear_shadow_entry(mapping, index, entry);
  117. return 1;
  118. }
  119. /**
  120. * do_invalidatepage - invalidate part or all of a page
  121. * @page: the page which is affected
  122. * @offset: start of the range to invalidate
  123. * @length: length of the range to invalidate
  124. *
  125. * do_invalidatepage() is called when all or part of the page has become
  126. * invalidated by a truncate operation.
  127. *
  128. * do_invalidatepage() does not have to release all buffers, but it must
  129. * ensure that no dirty buffer is left outside @offset and that no I/O
  130. * is underway against any of the blocks which are outside the truncation
  131. * point. Because the caller is about to free (and possibly reuse) those
  132. * blocks on-disk.
  133. */
  134. void do_invalidatepage(struct page *page, unsigned int offset,
  135. unsigned int length)
  136. {
  137. void (*invalidatepage)(struct page *, unsigned int, unsigned int);
  138. invalidatepage = page->mapping->a_ops->invalidatepage;
  139. #ifdef CONFIG_BLOCK
  140. if (!invalidatepage)
  141. invalidatepage = block_invalidatepage;
  142. #endif
  143. if (invalidatepage)
  144. (*invalidatepage)(page, offset, length);
  145. }
  146. /*
  147. * If truncate cannot remove the fs-private metadata from the page, the page
  148. * becomes orphaned. It will be left on the LRU and may even be mapped into
  149. * user pagetables if we're racing with filemap_fault().
  150. *
  151. * We need to bail out if page->mapping is no longer equal to the original
  152. * mapping. This happens a) when the VM reclaimed the page while we waited on
  153. * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  154. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  155. */
  156. static void truncate_cleanup_page(struct page *page)
  157. {
  158. if (page_mapped(page))
  159. unmap_mapping_page(page);
  160. if (page_has_private(page))
  161. do_invalidatepage(page, 0, thp_size(page));
  162. /*
  163. * Some filesystems seem to re-dirty the page even after
  164. * the VM has canceled the dirty bit (eg ext3 journaling).
  165. * Hence dirty accounting check is placed after invalidation.
  166. */
  167. cancel_dirty_page(page);
  168. ClearPageMappedToDisk(page);
  169. }
  170. /*
  171. * This is for invalidate_mapping_pages(). That function can be called at
  172. * any time, and is not supposed to throw away dirty pages. But pages can
  173. * be marked dirty at any time too, so use remove_mapping which safely
  174. * discards clean, unused pages.
  175. *
  176. * Returns non-zero if the page was successfully invalidated.
  177. */
  178. static int
  179. invalidate_complete_page(struct address_space *mapping, struct page *page)
  180. {
  181. int ret;
  182. if (page->mapping != mapping)
  183. return 0;
  184. if (page_has_private(page) && !try_to_release_page(page, 0))
  185. return 0;
  186. ret = remove_mapping(mapping, page);
  187. return ret;
  188. }
  189. int truncate_inode_page(struct address_space *mapping, struct page *page)
  190. {
  191. VM_BUG_ON_PAGE(PageTail(page), page);
  192. if (page->mapping != mapping)
  193. return -EIO;
  194. truncate_cleanup_page(page);
  195. delete_from_page_cache(page);
  196. return 0;
  197. }
  198. /*
  199. * Used to get rid of pages on hardware memory corruption.
  200. */
  201. int generic_error_remove_page(struct address_space *mapping, struct page *page)
  202. {
  203. if (!mapping)
  204. return -EINVAL;
  205. /*
  206. * Only punch for normal data pages for now.
  207. * Handling other types like directories would need more auditing.
  208. */
  209. if (!S_ISREG(mapping->host->i_mode))
  210. return -EIO;
  211. return truncate_inode_page(mapping, page);
  212. }
  213. EXPORT_SYMBOL(generic_error_remove_page);
  214. /*
  215. * Safely invalidate one page from its pagecache mapping.
  216. * It only drops clean, unused pages. The page must be locked.
  217. *
  218. * Returns 1 if the page is successfully invalidated, otherwise 0.
  219. */
  220. int invalidate_inode_page(struct page *page)
  221. {
  222. struct address_space *mapping = page_mapping(page);
  223. if (!mapping)
  224. return 0;
  225. if (PageDirty(page) || PageWriteback(page))
  226. return 0;
  227. if (page_mapped(page))
  228. return 0;
  229. return invalidate_complete_page(mapping, page);
  230. }
  231. /**
  232. * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
  233. * @mapping: mapping to truncate
  234. * @lstart: offset from which to truncate
  235. * @lend: offset to which to truncate (inclusive)
  236. *
  237. * Truncate the page cache, removing the pages that are between
  238. * specified offsets (and zeroing out partial pages
  239. * if lstart or lend + 1 is not page aligned).
  240. *
  241. * Truncate takes two passes - the first pass is nonblocking. It will not
  242. * block on page locks and it will not block on writeback. The second pass
  243. * will wait. This is to prevent as much IO as possible in the affected region.
  244. * The first pass will remove most pages, so the search cost of the second pass
  245. * is low.
  246. *
  247. * We pass down the cache-hot hint to the page freeing code. Even if the
  248. * mapping is large, it is probably the case that the final pages are the most
  249. * recently touched, and freeing happens in ascending file offset order.
  250. *
  251. * Note that since ->invalidatepage() accepts range to invalidate
  252. * truncate_inode_pages_range is able to handle cases where lend + 1 is not
  253. * page aligned properly.
  254. */
  255. void truncate_inode_pages_range(struct address_space *mapping,
  256. loff_t lstart, loff_t lend)
  257. {
  258. pgoff_t start; /* inclusive */
  259. pgoff_t end; /* exclusive */
  260. unsigned int partial_start; /* inclusive */
  261. unsigned int partial_end; /* exclusive */
  262. struct pagevec pvec;
  263. pgoff_t indices[PAGEVEC_SIZE];
  264. pgoff_t index;
  265. int i;
  266. if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
  267. goto out;
  268. /* Offsets within partial pages */
  269. partial_start = lstart & (PAGE_SIZE - 1);
  270. partial_end = (lend + 1) & (PAGE_SIZE - 1);
  271. /*
  272. * 'start' and 'end' always covers the range of pages to be fully
  273. * truncated. Partial pages are covered with 'partial_start' at the
  274. * start of the range and 'partial_end' at the end of the range.
  275. * Note that 'end' is exclusive while 'lend' is inclusive.
  276. */
  277. start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
  278. if (lend == -1)
  279. /*
  280. * lend == -1 indicates end-of-file so we have to set 'end'
  281. * to the highest possible pgoff_t and since the type is
  282. * unsigned we're using -1.
  283. */
  284. end = -1;
  285. else
  286. end = (lend + 1) >> PAGE_SHIFT;
  287. pagevec_init(&pvec);
  288. index = start;
  289. while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
  290. min(end - index, (pgoff_t)PAGEVEC_SIZE),
  291. indices)) {
  292. /*
  293. * Pagevec array has exceptional entries and we may also fail
  294. * to lock some pages. So we store pages that can be deleted
  295. * in a new pagevec.
  296. */
  297. struct pagevec locked_pvec;
  298. pagevec_init(&locked_pvec);
  299. for (i = 0; i < pagevec_count(&pvec); i++) {
  300. struct page *page = pvec.pages[i];
  301. /* We rely upon deletion not changing page->index */
  302. index = indices[i];
  303. if (index >= end)
  304. break;
  305. if (xa_is_value(page))
  306. continue;
  307. if (!trylock_page(page))
  308. continue;
  309. WARN_ON(page_to_index(page) != index);
  310. if (PageWriteback(page)) {
  311. unlock_page(page);
  312. continue;
  313. }
  314. if (page->mapping != mapping) {
  315. unlock_page(page);
  316. continue;
  317. }
  318. pagevec_add(&locked_pvec, page);
  319. }
  320. for (i = 0; i < pagevec_count(&locked_pvec); i++)
  321. truncate_cleanup_page(locked_pvec.pages[i]);
  322. delete_from_page_cache_batch(mapping, &locked_pvec);
  323. for (i = 0; i < pagevec_count(&locked_pvec); i++)
  324. unlock_page(locked_pvec.pages[i]);
  325. truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
  326. pagevec_release(&pvec);
  327. cond_resched();
  328. index++;
  329. }
  330. if (partial_start) {
  331. struct page *page = find_lock_page(mapping, start - 1);
  332. if (page) {
  333. unsigned int top = PAGE_SIZE;
  334. if (start > end) {
  335. /* Truncation within a single page */
  336. top = partial_end;
  337. partial_end = 0;
  338. }
  339. wait_on_page_writeback(page);
  340. zero_user_segment(page, partial_start, top);
  341. cleancache_invalidate_page(mapping, page);
  342. if (page_has_private(page))
  343. do_invalidatepage(page, partial_start,
  344. top - partial_start);
  345. unlock_page(page);
  346. put_page(page);
  347. }
  348. }
  349. if (partial_end) {
  350. struct page *page = find_lock_page(mapping, end);
  351. if (page) {
  352. wait_on_page_writeback(page);
  353. zero_user_segment(page, 0, partial_end);
  354. cleancache_invalidate_page(mapping, page);
  355. if (page_has_private(page))
  356. do_invalidatepage(page, 0,
  357. partial_end);
  358. unlock_page(page);
  359. put_page(page);
  360. }
  361. }
  362. /*
  363. * If the truncation happened within a single page no pages
  364. * will be released, just zeroed, so we can bail out now.
  365. */
  366. if (start >= end)
  367. goto out;
  368. index = start;
  369. for ( ; ; ) {
  370. cond_resched();
  371. if (!pagevec_lookup_entries(&pvec, mapping, index,
  372. min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
  373. /* If all gone from start onwards, we're done */
  374. if (index == start)
  375. break;
  376. /* Otherwise restart to make sure all gone */
  377. index = start;
  378. continue;
  379. }
  380. if (index == start && indices[0] >= end) {
  381. /* All gone out of hole to be punched, we're done */
  382. pagevec_remove_exceptionals(&pvec);
  383. pagevec_release(&pvec);
  384. break;
  385. }
  386. for (i = 0; i < pagevec_count(&pvec); i++) {
  387. struct page *page = pvec.pages[i];
  388. /* We rely upon deletion not changing page->index */
  389. index = indices[i];
  390. if (index >= end) {
  391. /* Restart punch to make sure all gone */
  392. index = start - 1;
  393. break;
  394. }
  395. if (xa_is_value(page))
  396. continue;
  397. lock_page(page);
  398. WARN_ON(page_to_index(page) != index);
  399. wait_on_page_writeback(page);
  400. truncate_inode_page(mapping, page);
  401. unlock_page(page);
  402. }
  403. truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
  404. pagevec_release(&pvec);
  405. index++;
  406. }
  407. out:
  408. cleancache_invalidate_inode(mapping);
  409. }
  410. EXPORT_SYMBOL(truncate_inode_pages_range);
  411. /**
  412. * truncate_inode_pages - truncate *all* the pages from an offset
  413. * @mapping: mapping to truncate
  414. * @lstart: offset from which to truncate
  415. *
  416. * Called under (and serialised by) inode->i_mutex.
  417. *
  418. * Note: When this function returns, there can be a page in the process of
  419. * deletion (inside __delete_from_page_cache()) in the specified range. Thus
  420. * mapping->nrpages can be non-zero when this function returns even after
  421. * truncation of the whole mapping.
  422. */
  423. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  424. {
  425. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  426. }
  427. EXPORT_SYMBOL(truncate_inode_pages);
  428. /**
  429. * truncate_inode_pages_final - truncate *all* pages before inode dies
  430. * @mapping: mapping to truncate
  431. *
  432. * Called under (and serialized by) inode->i_mutex.
  433. *
  434. * Filesystems have to use this in the .evict_inode path to inform the
  435. * VM that this is the final truncate and the inode is going away.
  436. */
  437. void truncate_inode_pages_final(struct address_space *mapping)
  438. {
  439. unsigned long nrexceptional;
  440. unsigned long nrpages;
  441. /*
  442. * Page reclaim can not participate in regular inode lifetime
  443. * management (can't call iput()) and thus can race with the
  444. * inode teardown. Tell it when the address space is exiting,
  445. * so that it does not install eviction information after the
  446. * final truncate has begun.
  447. */
  448. mapping_set_exiting(mapping);
  449. /*
  450. * When reclaim installs eviction entries, it increases
  451. * nrexceptional first, then decreases nrpages. Make sure we see
  452. * this in the right order or we might miss an entry.
  453. */
  454. nrpages = mapping->nrpages;
  455. smp_rmb();
  456. nrexceptional = mapping->nrexceptional;
  457. if (nrpages || nrexceptional) {
  458. /*
  459. * As truncation uses a lockless tree lookup, cycle
  460. * the tree lock to make sure any ongoing tree
  461. * modification that does not see AS_EXITING is
  462. * completed before starting the final truncate.
  463. */
  464. xa_lock_irq(&mapping->i_pages);
  465. xa_unlock_irq(&mapping->i_pages);
  466. }
  467. /*
  468. * Cleancache needs notification even if there are no pages or shadow
  469. * entries.
  470. */
  471. truncate_inode_pages(mapping, 0);
  472. }
  473. EXPORT_SYMBOL(truncate_inode_pages_final);
  474. static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
  475. pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
  476. {
  477. pgoff_t indices[PAGEVEC_SIZE];
  478. struct pagevec pvec;
  479. pgoff_t index = start;
  480. unsigned long ret;
  481. unsigned long count = 0;
  482. int i;
  483. pagevec_init(&pvec);
  484. while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  485. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  486. indices)) {
  487. for (i = 0; i < pagevec_count(&pvec); i++) {
  488. struct page *page = pvec.pages[i];
  489. /* We rely upon deletion not changing page->index */
  490. index = indices[i];
  491. if (index > end)
  492. break;
  493. if (xa_is_value(page)) {
  494. invalidate_exceptional_entry(mapping, index,
  495. page);
  496. continue;
  497. }
  498. if (!trylock_page(page))
  499. continue;
  500. WARN_ON(page_to_index(page) != index);
  501. /* Middle of THP: skip */
  502. if (PageTransTail(page)) {
  503. unlock_page(page);
  504. continue;
  505. } else if (PageTransHuge(page)) {
  506. index += HPAGE_PMD_NR - 1;
  507. i += HPAGE_PMD_NR - 1;
  508. /*
  509. * 'end' is in the middle of THP. Don't
  510. * invalidate the page as the part outside of
  511. * 'end' could be still useful.
  512. */
  513. if (index > end) {
  514. unlock_page(page);
  515. continue;
  516. }
  517. /* Take a pin outside pagevec */
  518. get_page(page);
  519. /*
  520. * Drop extra pins before trying to invalidate
  521. * the huge page.
  522. */
  523. pagevec_remove_exceptionals(&pvec);
  524. pagevec_release(&pvec);
  525. }
  526. ret = invalidate_inode_page(page);
  527. unlock_page(page);
  528. /*
  529. * Invalidation is a hint that the page is no longer
  530. * of interest and try to speed up its reclaim.
  531. */
  532. if (!ret) {
  533. deactivate_file_page(page);
  534. /* It is likely on the pagevec of a remote CPU */
  535. if (nr_pagevec)
  536. (*nr_pagevec)++;
  537. }
  538. if (PageTransHuge(page))
  539. put_page(page);
  540. count += ret;
  541. }
  542. pagevec_remove_exceptionals(&pvec);
  543. pagevec_release(&pvec);
  544. cond_resched();
  545. index++;
  546. }
  547. return count;
  548. }
  549. /**
  550. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  551. * @mapping: the address_space which holds the pages to invalidate
  552. * @start: the offset 'from' which to invalidate
  553. * @end: the offset 'to' which to invalidate (inclusive)
  554. *
  555. * This function only removes the unlocked pages, if you want to
  556. * remove all the pages of one inode, you must call truncate_inode_pages.
  557. *
  558. * invalidate_mapping_pages() will not block on IO activity. It will not
  559. * invalidate pages which are dirty, locked, under writeback or mapped into
  560. * pagetables.
  561. *
  562. * Return: the number of the pages that were invalidated
  563. */
  564. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  565. pgoff_t start, pgoff_t end)
  566. {
  567. return __invalidate_mapping_pages(mapping, start, end, NULL);
  568. }
  569. EXPORT_SYMBOL(invalidate_mapping_pages);
  570. /**
  571. * This helper is similar with the above one, except that it accounts for pages
  572. * that are likely on a pagevec and count them in @nr_pagevec, which will used by
  573. * the caller.
  574. */
  575. void invalidate_mapping_pagevec(struct address_space *mapping,
  576. pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
  577. {
  578. __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
  579. }
  580. /*
  581. * This is like invalidate_complete_page(), except it ignores the page's
  582. * refcount. We do this because invalidate_inode_pages2() needs stronger
  583. * invalidation guarantees, and cannot afford to leave pages behind because
  584. * shrink_page_list() has a temp ref on them, or because they're transiently
  585. * sitting in the lru_cache_add() pagevecs.
  586. */
  587. static int
  588. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  589. {
  590. unsigned long flags;
  591. if (page->mapping != mapping)
  592. return 0;
  593. if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
  594. return 0;
  595. xa_lock_irqsave(&mapping->i_pages, flags);
  596. if (PageDirty(page))
  597. goto failed;
  598. BUG_ON(page_has_private(page));
  599. __delete_from_page_cache(page, NULL);
  600. xa_unlock_irqrestore(&mapping->i_pages, flags);
  601. if (mapping->a_ops->freepage)
  602. mapping->a_ops->freepage(page);
  603. put_page(page); /* pagecache ref */
  604. return 1;
  605. failed:
  606. xa_unlock_irqrestore(&mapping->i_pages, flags);
  607. return 0;
  608. }
  609. static int do_launder_page(struct address_space *mapping, struct page *page)
  610. {
  611. if (!PageDirty(page))
  612. return 0;
  613. if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
  614. return 0;
  615. return mapping->a_ops->launder_page(page);
  616. }
  617. /**
  618. * invalidate_inode_pages2_range - remove range of pages from an address_space
  619. * @mapping: the address_space
  620. * @start: the page offset 'from' which to invalidate
  621. * @end: the page offset 'to' which to invalidate (inclusive)
  622. *
  623. * Any pages which are found to be mapped into pagetables are unmapped prior to
  624. * invalidation.
  625. *
  626. * Return: -EBUSY if any pages could not be invalidated.
  627. */
  628. int invalidate_inode_pages2_range(struct address_space *mapping,
  629. pgoff_t start, pgoff_t end)
  630. {
  631. pgoff_t indices[PAGEVEC_SIZE];
  632. struct pagevec pvec;
  633. pgoff_t index;
  634. int i;
  635. int ret = 0;
  636. int ret2 = 0;
  637. int did_range_unmap = 0;
  638. if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
  639. goto out;
  640. pagevec_init(&pvec);
  641. index = start;
  642. while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  643. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  644. indices)) {
  645. for (i = 0; i < pagevec_count(&pvec); i++) {
  646. struct page *page = pvec.pages[i];
  647. /* We rely upon deletion not changing page->index */
  648. index = indices[i];
  649. if (index > end)
  650. break;
  651. if (xa_is_value(page)) {
  652. if (!invalidate_exceptional_entry2(mapping,
  653. index, page))
  654. ret = -EBUSY;
  655. continue;
  656. }
  657. if (!did_range_unmap && page_mapped(page)) {
  658. /*
  659. * If page is mapped, before taking its lock,
  660. * zap the rest of the file in one hit.
  661. */
  662. unmap_mapping_pages(mapping, index,
  663. (1 + end - index), false);
  664. did_range_unmap = 1;
  665. }
  666. lock_page(page);
  667. WARN_ON(page_to_index(page) != index);
  668. if (page->mapping != mapping) {
  669. unlock_page(page);
  670. continue;
  671. }
  672. wait_on_page_writeback(page);
  673. if (page_mapped(page))
  674. unmap_mapping_page(page);
  675. BUG_ON(page_mapped(page));
  676. ret2 = do_launder_page(mapping, page);
  677. if (ret2 == 0) {
  678. if (!invalidate_complete_page2(mapping, page))
  679. ret2 = -EBUSY;
  680. }
  681. if (ret2 < 0)
  682. ret = ret2;
  683. unlock_page(page);
  684. }
  685. pagevec_remove_exceptionals(&pvec);
  686. pagevec_release(&pvec);
  687. cond_resched();
  688. index++;
  689. }
  690. /*
  691. * For DAX we invalidate page tables after invalidating page cache. We
  692. * could invalidate page tables while invalidating each entry however
  693. * that would be expensive. And doing range unmapping before doesn't
  694. * work as we have no cheap way to find whether page cache entry didn't
  695. * get remapped later.
  696. */
  697. if (dax_mapping(mapping)) {
  698. unmap_mapping_pages(mapping, start, end - start + 1, false);
  699. }
  700. out:
  701. cleancache_invalidate_inode(mapping);
  702. return ret;
  703. }
  704. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  705. /**
  706. * invalidate_inode_pages2 - remove all pages from an address_space
  707. * @mapping: the address_space
  708. *
  709. * Any pages which are found to be mapped into pagetables are unmapped prior to
  710. * invalidation.
  711. *
  712. * Return: -EBUSY if any pages could not be invalidated.
  713. */
  714. int invalidate_inode_pages2(struct address_space *mapping)
  715. {
  716. return invalidate_inode_pages2_range(mapping, 0, -1);
  717. }
  718. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
  719. /**
  720. * truncate_pagecache - unmap and remove pagecache that has been truncated
  721. * @inode: inode
  722. * @newsize: new file size
  723. *
  724. * inode's new i_size must already be written before truncate_pagecache
  725. * is called.
  726. *
  727. * This function should typically be called before the filesystem
  728. * releases resources associated with the freed range (eg. deallocates
  729. * blocks). This way, pagecache will always stay logically coherent
  730. * with on-disk format, and the filesystem would not have to deal with
  731. * situations such as writepage being called for a page that has already
  732. * had its underlying blocks deallocated.
  733. */
  734. void truncate_pagecache(struct inode *inode, loff_t newsize)
  735. {
  736. struct address_space *mapping = inode->i_mapping;
  737. loff_t holebegin = round_up(newsize, PAGE_SIZE);
  738. /*
  739. * unmap_mapping_range is called twice, first simply for
  740. * efficiency so that truncate_inode_pages does fewer
  741. * single-page unmaps. However after this first call, and
  742. * before truncate_inode_pages finishes, it is possible for
  743. * private pages to be COWed, which remain after
  744. * truncate_inode_pages finishes, hence the second
  745. * unmap_mapping_range call must be made for correctness.
  746. */
  747. unmap_mapping_range(mapping, holebegin, 0, 1);
  748. truncate_inode_pages(mapping, newsize);
  749. unmap_mapping_range(mapping, holebegin, 0, 1);
  750. }
  751. EXPORT_SYMBOL(truncate_pagecache);
  752. /**
  753. * truncate_setsize - update inode and pagecache for a new file size
  754. * @inode: inode
  755. * @newsize: new file size
  756. *
  757. * truncate_setsize updates i_size and performs pagecache truncation (if
  758. * necessary) to @newsize. It will be typically be called from the filesystem's
  759. * setattr function when ATTR_SIZE is passed in.
  760. *
  761. * Must be called with a lock serializing truncates and writes (generally
  762. * i_mutex but e.g. xfs uses a different lock) and before all filesystem
  763. * specific block truncation has been performed.
  764. */
  765. void truncate_setsize(struct inode *inode, loff_t newsize)
  766. {
  767. loff_t oldsize = inode->i_size;
  768. i_size_write(inode, newsize);
  769. if (newsize > oldsize)
  770. pagecache_isize_extended(inode, oldsize, newsize);
  771. truncate_pagecache(inode, newsize);
  772. }
  773. EXPORT_SYMBOL(truncate_setsize);
  774. /**
  775. * pagecache_isize_extended - update pagecache after extension of i_size
  776. * @inode: inode for which i_size was extended
  777. * @from: original inode size
  778. * @to: new inode size
  779. *
  780. * Handle extension of inode size either caused by extending truncate or by
  781. * write starting after current i_size. We mark the page straddling current
  782. * i_size RO so that page_mkwrite() is called on the nearest write access to
  783. * the page. This way filesystem can be sure that page_mkwrite() is called on
  784. * the page before user writes to the page via mmap after the i_size has been
  785. * changed.
  786. *
  787. * The function must be called after i_size is updated so that page fault
  788. * coming after we unlock the page will already see the new i_size.
  789. * The function must be called while we still hold i_mutex - this not only
  790. * makes sure i_size is stable but also that userspace cannot observe new
  791. * i_size value before we are prepared to store mmap writes at new inode size.
  792. */
  793. void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
  794. {
  795. int bsize = i_blocksize(inode);
  796. loff_t rounded_from;
  797. struct page *page;
  798. pgoff_t index;
  799. WARN_ON(to > inode->i_size);
  800. if (from >= to || bsize == PAGE_SIZE)
  801. return;
  802. /* Page straddling @from will not have any hole block created? */
  803. rounded_from = round_up(from, bsize);
  804. if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
  805. return;
  806. index = from >> PAGE_SHIFT;
  807. page = find_lock_page(inode->i_mapping, index);
  808. /* Page not cached? Nothing to do */
  809. if (!page)
  810. return;
  811. /*
  812. * See clear_page_dirty_for_io() for details why set_page_dirty()
  813. * is needed.
  814. */
  815. if (page_mkclean(page))
  816. set_page_dirty(page);
  817. unlock_page(page);
  818. put_page(page);
  819. }
  820. EXPORT_SYMBOL(pagecache_isize_extended);
  821. /**
  822. * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  823. * @inode: inode
  824. * @lstart: offset of beginning of hole
  825. * @lend: offset of last byte of hole
  826. *
  827. * This function should typically be called before the filesystem
  828. * releases resources associated with the freed range (eg. deallocates
  829. * blocks). This way, pagecache will always stay logically coherent
  830. * with on-disk format, and the filesystem would not have to deal with
  831. * situations such as writepage being called for a page that has already
  832. * had its underlying blocks deallocated.
  833. */
  834. void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
  835. {
  836. struct address_space *mapping = inode->i_mapping;
  837. loff_t unmap_start = round_up(lstart, PAGE_SIZE);
  838. loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
  839. /*
  840. * This rounding is currently just for example: unmap_mapping_range
  841. * expands its hole outwards, whereas we want it to contract the hole
  842. * inwards. However, existing callers of truncate_pagecache_range are
  843. * doing their own page rounding first. Note that unmap_mapping_range
  844. * allows holelen 0 for all, and we allow lend -1 for end of file.
  845. */
  846. /*
  847. * Unlike in truncate_pagecache, unmap_mapping_range is called only
  848. * once (before truncating pagecache), and without "even_cows" flag:
  849. * hole-punching should not remove private COWed pages from the hole.
  850. */
  851. if ((u64)unmap_end > (u64)unmap_start)
  852. unmap_mapping_range(mapping, unmap_start,
  853. 1 + unmap_end - unmap_start, 0);
  854. truncate_inode_pages_range(mapping, lstart, lend);
  855. }
  856. EXPORT_SYMBOL(truncate_pagecache_range);