aops.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  4. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/slab.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/completion.h>
  10. #include <linux/buffer_head.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/pagevec.h>
  13. #include <linux/mpage.h>
  14. #include <linux/fs.h>
  15. #include <linux/writeback.h>
  16. #include <linux/swap.h>
  17. #include <linux/gfs2_ondisk.h>
  18. #include <linux/backing-dev.h>
  19. #include <linux/uio.h>
  20. #include <trace/events/writeback.h>
  21. #include <linux/sched/signal.h>
  22. #include "gfs2.h"
  23. #include "incore.h"
  24. #include "bmap.h"
  25. #include "glock.h"
  26. #include "inode.h"
  27. #include "log.h"
  28. #include "meta_io.h"
  29. #include "quota.h"
  30. #include "trans.h"
  31. #include "rgrp.h"
  32. #include "super.h"
  33. #include "util.h"
  34. #include "glops.h"
  35. #include "aops.h"
  36. void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  37. unsigned int from, unsigned int len)
  38. {
  39. struct buffer_head *head = page_buffers(page);
  40. unsigned int bsize = head->b_size;
  41. struct buffer_head *bh;
  42. unsigned int to = from + len;
  43. unsigned int start, end;
  44. for (bh = head, start = 0; bh != head || !start;
  45. bh = bh->b_this_page, start = end) {
  46. end = start + bsize;
  47. if (end <= from)
  48. continue;
  49. if (start >= to)
  50. break;
  51. set_buffer_uptodate(bh);
  52. gfs2_trans_add_data(ip->i_gl, bh);
  53. }
  54. }
  55. /**
  56. * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
  57. * @inode: The inode
  58. * @lblock: The block number to look up
  59. * @bh_result: The buffer head to return the result in
  60. * @create: Non-zero if we may add block to the file
  61. *
  62. * Returns: errno
  63. */
  64. static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
  65. struct buffer_head *bh_result, int create)
  66. {
  67. int error;
  68. error = gfs2_block_map(inode, lblock, bh_result, 0);
  69. if (error)
  70. return error;
  71. if (!buffer_mapped(bh_result))
  72. return -ENODATA;
  73. return 0;
  74. }
  75. /**
  76. * gfs2_writepage - Write page for writeback mappings
  77. * @page: The page
  78. * @wbc: The writeback control
  79. */
  80. static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
  81. {
  82. struct inode *inode = page->mapping->host;
  83. struct gfs2_inode *ip = GFS2_I(inode);
  84. struct gfs2_sbd *sdp = GFS2_SB(inode);
  85. struct iomap_writepage_ctx wpc = { };
  86. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
  87. goto out;
  88. if (current->journal_info)
  89. goto redirty;
  90. return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
  91. redirty:
  92. redirty_page_for_writepage(wbc, page);
  93. out:
  94. unlock_page(page);
  95. return 0;
  96. }
  97. /**
  98. * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
  99. * @page: The page to write
  100. * @wbc: The writeback control
  101. *
  102. * This is the same as calling block_write_full_page, but it also
  103. * writes pages outside of i_size
  104. */
  105. static int gfs2_write_jdata_page(struct page *page,
  106. struct writeback_control *wbc)
  107. {
  108. struct inode * const inode = page->mapping->host;
  109. loff_t i_size = i_size_read(inode);
  110. const pgoff_t end_index = i_size >> PAGE_SHIFT;
  111. unsigned offset;
  112. /*
  113. * The page straddles i_size. It must be zeroed out on each and every
  114. * writepage invocation because it may be mmapped. "A file is mapped
  115. * in multiples of the page size. For a file that is not a multiple of
  116. * the page size, the remaining memory is zeroed when mapped, and
  117. * writes to that region are not written out to the file."
  118. */
  119. offset = i_size & (PAGE_SIZE - 1);
  120. if (page->index == end_index && offset)
  121. zero_user_segment(page, offset, PAGE_SIZE);
  122. return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
  123. end_buffer_async_write);
  124. }
  125. /**
  126. * __gfs2_jdata_writepage - The core of jdata writepage
  127. * @page: The page to write
  128. * @wbc: The writeback control
  129. *
  130. * This is shared between writepage and writepages and implements the
  131. * core of the writepage operation. If a transaction is required then
  132. * PageChecked will have been set and the transaction will have
  133. * already been started before this is called.
  134. */
  135. static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
  136. {
  137. struct inode *inode = page->mapping->host;
  138. struct gfs2_inode *ip = GFS2_I(inode);
  139. struct gfs2_sbd *sdp = GFS2_SB(inode);
  140. if (PageChecked(page)) {
  141. ClearPageChecked(page);
  142. if (!page_has_buffers(page)) {
  143. create_empty_buffers(page, inode->i_sb->s_blocksize,
  144. BIT(BH_Dirty)|BIT(BH_Uptodate));
  145. }
  146. gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
  147. }
  148. return gfs2_write_jdata_page(page, wbc);
  149. }
  150. /**
  151. * gfs2_jdata_writepage - Write complete page
  152. * @page: Page to write
  153. * @wbc: The writeback control
  154. *
  155. * Returns: errno
  156. *
  157. */
  158. static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
  159. {
  160. struct inode *inode = page->mapping->host;
  161. struct gfs2_inode *ip = GFS2_I(inode);
  162. struct gfs2_sbd *sdp = GFS2_SB(inode);
  163. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
  164. goto out;
  165. if (PageChecked(page) || current->journal_info)
  166. goto out_ignore;
  167. return __gfs2_jdata_writepage(page, wbc);
  168. out_ignore:
  169. redirty_page_for_writepage(wbc, page);
  170. out:
  171. unlock_page(page);
  172. return 0;
  173. }
  174. /**
  175. * gfs2_writepages - Write a bunch of dirty pages back to disk
  176. * @mapping: The mapping to write
  177. * @wbc: Write-back control
  178. *
  179. * Used for both ordered and writeback modes.
  180. */
  181. static int gfs2_writepages(struct address_space *mapping,
  182. struct writeback_control *wbc)
  183. {
  184. struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
  185. struct iomap_writepage_ctx wpc = { };
  186. int ret;
  187. /*
  188. * Even if we didn't write any pages here, we might still be holding
  189. * dirty pages in the ail. We forcibly flush the ail because we don't
  190. * want balance_dirty_pages() to loop indefinitely trying to write out
  191. * pages held in the ail that it can't find.
  192. */
  193. ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
  194. if (ret == 0)
  195. set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
  196. return ret;
  197. }
  198. /**
  199. * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
  200. * @mapping: The mapping
  201. * @wbc: The writeback control
  202. * @pvec: The vector of pages
  203. * @nr_pages: The number of pages to write
  204. * @done_index: Page index
  205. *
  206. * Returns: non-zero if loop should terminate, zero otherwise
  207. */
  208. static int gfs2_write_jdata_pagevec(struct address_space *mapping,
  209. struct writeback_control *wbc,
  210. struct pagevec *pvec,
  211. int nr_pages,
  212. pgoff_t *done_index)
  213. {
  214. struct inode *inode = mapping->host;
  215. struct gfs2_sbd *sdp = GFS2_SB(inode);
  216. unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
  217. int i;
  218. int ret;
  219. ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
  220. if (ret < 0)
  221. return ret;
  222. for(i = 0; i < nr_pages; i++) {
  223. struct page *page = pvec->pages[i];
  224. *done_index = page->index;
  225. lock_page(page);
  226. if (unlikely(page->mapping != mapping)) {
  227. continue_unlock:
  228. unlock_page(page);
  229. continue;
  230. }
  231. if (!PageDirty(page)) {
  232. /* someone wrote it for us */
  233. goto continue_unlock;
  234. }
  235. if (PageWriteback(page)) {
  236. if (wbc->sync_mode != WB_SYNC_NONE)
  237. wait_on_page_writeback(page);
  238. else
  239. goto continue_unlock;
  240. }
  241. BUG_ON(PageWriteback(page));
  242. if (!clear_page_dirty_for_io(page))
  243. goto continue_unlock;
  244. trace_wbc_writepage(wbc, inode_to_bdi(inode));
  245. ret = __gfs2_jdata_writepage(page, wbc);
  246. if (unlikely(ret)) {
  247. if (ret == AOP_WRITEPAGE_ACTIVATE) {
  248. unlock_page(page);
  249. ret = 0;
  250. } else {
  251. /*
  252. * done_index is set past this page,
  253. * so media errors will not choke
  254. * background writeout for the entire
  255. * file. This has consequences for
  256. * range_cyclic semantics (ie. it may
  257. * not be suitable for data integrity
  258. * writeout).
  259. */
  260. *done_index = page->index + 1;
  261. ret = 1;
  262. break;
  263. }
  264. }
  265. /*
  266. * We stop writing back only if we are not doing
  267. * integrity sync. In case of integrity sync we have to
  268. * keep going until we have written all the pages
  269. * we tagged for writeback prior to entering this loop.
  270. */
  271. if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
  272. ret = 1;
  273. break;
  274. }
  275. }
  276. gfs2_trans_end(sdp);
  277. return ret;
  278. }
  279. /**
  280. * gfs2_write_cache_jdata - Like write_cache_pages but different
  281. * @mapping: The mapping to write
  282. * @wbc: The writeback control
  283. *
  284. * The reason that we use our own function here is that we need to
  285. * start transactions before we grab page locks. This allows us
  286. * to get the ordering right.
  287. */
  288. static int gfs2_write_cache_jdata(struct address_space *mapping,
  289. struct writeback_control *wbc)
  290. {
  291. int ret = 0;
  292. int done = 0;
  293. struct pagevec pvec;
  294. int nr_pages;
  295. pgoff_t writeback_index;
  296. pgoff_t index;
  297. pgoff_t end;
  298. pgoff_t done_index;
  299. int cycled;
  300. int range_whole = 0;
  301. xa_mark_t tag;
  302. pagevec_init(&pvec);
  303. if (wbc->range_cyclic) {
  304. writeback_index = mapping->writeback_index; /* prev offset */
  305. index = writeback_index;
  306. if (index == 0)
  307. cycled = 1;
  308. else
  309. cycled = 0;
  310. end = -1;
  311. } else {
  312. index = wbc->range_start >> PAGE_SHIFT;
  313. end = wbc->range_end >> PAGE_SHIFT;
  314. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  315. range_whole = 1;
  316. cycled = 1; /* ignore range_cyclic tests */
  317. }
  318. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  319. tag = PAGECACHE_TAG_TOWRITE;
  320. else
  321. tag = PAGECACHE_TAG_DIRTY;
  322. retry:
  323. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  324. tag_pages_for_writeback(mapping, index, end);
  325. done_index = index;
  326. while (!done && (index <= end)) {
  327. nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
  328. tag);
  329. if (nr_pages == 0)
  330. break;
  331. ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
  332. if (ret)
  333. done = 1;
  334. if (ret > 0)
  335. ret = 0;
  336. pagevec_release(&pvec);
  337. cond_resched();
  338. }
  339. if (!cycled && !done) {
  340. /*
  341. * range_cyclic:
  342. * We hit the last page and there is more work to be done: wrap
  343. * back to the start of the file
  344. */
  345. cycled = 1;
  346. index = 0;
  347. end = writeback_index - 1;
  348. goto retry;
  349. }
  350. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  351. mapping->writeback_index = done_index;
  352. return ret;
  353. }
  354. /**
  355. * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
  356. * @mapping: The mapping to write
  357. * @wbc: The writeback control
  358. *
  359. */
  360. static int gfs2_jdata_writepages(struct address_space *mapping,
  361. struct writeback_control *wbc)
  362. {
  363. struct gfs2_inode *ip = GFS2_I(mapping->host);
  364. struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
  365. int ret;
  366. ret = gfs2_write_cache_jdata(mapping, wbc);
  367. if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
  368. gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
  369. GFS2_LFC_JDATA_WPAGES);
  370. ret = gfs2_write_cache_jdata(mapping, wbc);
  371. }
  372. return ret;
  373. }
  374. /**
  375. * stuffed_readpage - Fill in a Linux page with stuffed file data
  376. * @ip: the inode
  377. * @page: the page
  378. *
  379. * Returns: errno
  380. */
  381. static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
  382. {
  383. struct buffer_head *dibh;
  384. u64 dsize = i_size_read(&ip->i_inode);
  385. void *kaddr;
  386. int error;
  387. /*
  388. * Due to the order of unstuffing files and ->fault(), we can be
  389. * asked for a zero page in the case of a stuffed file being extended,
  390. * so we need to supply one here. It doesn't happen often.
  391. */
  392. if (unlikely(page->index)) {
  393. zero_user(page, 0, PAGE_SIZE);
  394. SetPageUptodate(page);
  395. return 0;
  396. }
  397. error = gfs2_meta_inode_buffer(ip, &dibh);
  398. if (error)
  399. return error;
  400. kaddr = kmap_atomic(page);
  401. if (dsize > gfs2_max_stuffed_size(ip))
  402. dsize = gfs2_max_stuffed_size(ip);
  403. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  404. memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
  405. kunmap_atomic(kaddr);
  406. flush_dcache_page(page);
  407. brelse(dibh);
  408. SetPageUptodate(page);
  409. return 0;
  410. }
  411. static int __gfs2_readpage(void *file, struct page *page)
  412. {
  413. struct inode *inode = page->mapping->host;
  414. struct gfs2_inode *ip = GFS2_I(inode);
  415. struct gfs2_sbd *sdp = GFS2_SB(inode);
  416. int error;
  417. if (!gfs2_is_jdata(ip) ||
  418. (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
  419. error = iomap_readpage(page, &gfs2_iomap_ops);
  420. } else if (gfs2_is_stuffed(ip)) {
  421. error = stuffed_readpage(ip, page);
  422. unlock_page(page);
  423. } else {
  424. error = mpage_readpage(page, gfs2_block_map);
  425. }
  426. if (unlikely(gfs2_withdrawn(sdp)))
  427. return -EIO;
  428. return error;
  429. }
  430. /**
  431. * gfs2_readpage - read a page of a file
  432. * @file: The file to read
  433. * @page: The page of the file
  434. */
  435. static int gfs2_readpage(struct file *file, struct page *page)
  436. {
  437. return __gfs2_readpage(file, page);
  438. }
  439. /**
  440. * gfs2_internal_read - read an internal file
  441. * @ip: The gfs2 inode
  442. * @buf: The buffer to fill
  443. * @pos: The file position
  444. * @size: The amount to read
  445. *
  446. */
  447. int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
  448. unsigned size)
  449. {
  450. struct address_space *mapping = ip->i_inode.i_mapping;
  451. unsigned long index = *pos >> PAGE_SHIFT;
  452. unsigned offset = *pos & (PAGE_SIZE - 1);
  453. unsigned copied = 0;
  454. unsigned amt;
  455. struct page *page;
  456. void *p;
  457. do {
  458. amt = size - copied;
  459. if (offset + size > PAGE_SIZE)
  460. amt = PAGE_SIZE - offset;
  461. page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
  462. if (IS_ERR(page))
  463. return PTR_ERR(page);
  464. p = kmap_atomic(page);
  465. memcpy(buf + copied, p + offset, amt);
  466. kunmap_atomic(p);
  467. put_page(page);
  468. copied += amt;
  469. index++;
  470. offset = 0;
  471. } while(copied < size);
  472. (*pos) += size;
  473. return size;
  474. }
  475. /**
  476. * gfs2_readahead - Read a bunch of pages at once
  477. * @file: The file to read from
  478. * @mapping: Address space info
  479. * @pages: List of pages to read
  480. * @nr_pages: Number of pages to read
  481. *
  482. * Some notes:
  483. * 1. This is only for readahead, so we can simply ignore any things
  484. * which are slightly inconvenient (such as locking conflicts between
  485. * the page lock and the glock) and return having done no I/O. Its
  486. * obviously not something we'd want to do on too regular a basis.
  487. * Any I/O we ignore at this time will be done via readpage later.
  488. * 2. We don't handle stuffed files here we let readpage do the honours.
  489. * 3. mpage_readahead() does most of the heavy lifting in the common case.
  490. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
  491. */
  492. static void gfs2_readahead(struct readahead_control *rac)
  493. {
  494. struct inode *inode = rac->mapping->host;
  495. struct gfs2_inode *ip = GFS2_I(inode);
  496. if (gfs2_is_stuffed(ip))
  497. ;
  498. else if (gfs2_is_jdata(ip))
  499. mpage_readahead(rac, gfs2_block_map);
  500. else
  501. iomap_readahead(rac, &gfs2_iomap_ops);
  502. }
  503. /**
  504. * adjust_fs_space - Adjusts the free space available due to gfs2_grow
  505. * @inode: the rindex inode
  506. */
  507. void adjust_fs_space(struct inode *inode)
  508. {
  509. struct gfs2_sbd *sdp = GFS2_SB(inode);
  510. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  511. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  512. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  513. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  514. struct buffer_head *m_bh, *l_bh;
  515. u64 fs_total, new_free;
  516. if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
  517. return;
  518. /* Total up the file system space, according to the latest rindex. */
  519. fs_total = gfs2_ri_total(sdp);
  520. if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
  521. goto out;
  522. spin_lock(&sdp->sd_statfs_spin);
  523. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  524. sizeof(struct gfs2_dinode));
  525. if (fs_total > (m_sc->sc_total + l_sc->sc_total))
  526. new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
  527. else
  528. new_free = 0;
  529. spin_unlock(&sdp->sd_statfs_spin);
  530. fs_warn(sdp, "File system extended by %llu blocks.\n",
  531. (unsigned long long)new_free);
  532. gfs2_statfs_change(sdp, new_free, new_free, 0);
  533. if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
  534. goto out2;
  535. update_statfs(sdp, m_bh, l_bh);
  536. brelse(l_bh);
  537. out2:
  538. brelse(m_bh);
  539. out:
  540. sdp->sd_rindex_uptodate = 0;
  541. gfs2_trans_end(sdp);
  542. }
  543. /**
  544. * jdata_set_page_dirty - Page dirtying function
  545. * @page: The page to dirty
  546. *
  547. * Returns: 1 if it dirtyed the page, or 0 otherwise
  548. */
  549. static int jdata_set_page_dirty(struct page *page)
  550. {
  551. if (current->journal_info)
  552. SetPageChecked(page);
  553. return __set_page_dirty_buffers(page);
  554. }
  555. /**
  556. * gfs2_bmap - Block map function
  557. * @mapping: Address space info
  558. * @lblock: The block to map
  559. *
  560. * Returns: The disk address for the block or 0 on hole or error
  561. */
  562. static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
  563. {
  564. struct gfs2_inode *ip = GFS2_I(mapping->host);
  565. struct gfs2_holder i_gh;
  566. sector_t dblock = 0;
  567. int error;
  568. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  569. if (error)
  570. return 0;
  571. if (!gfs2_is_stuffed(ip))
  572. dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
  573. gfs2_glock_dq_uninit(&i_gh);
  574. return dblock;
  575. }
  576. static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
  577. {
  578. struct gfs2_bufdata *bd;
  579. lock_buffer(bh);
  580. gfs2_log_lock(sdp);
  581. clear_buffer_dirty(bh);
  582. bd = bh->b_private;
  583. if (bd) {
  584. if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
  585. list_del_init(&bd->bd_list);
  586. else {
  587. spin_lock(&sdp->sd_ail_lock);
  588. gfs2_remove_from_journal(bh, REMOVE_JDATA);
  589. spin_unlock(&sdp->sd_ail_lock);
  590. }
  591. }
  592. bh->b_bdev = NULL;
  593. clear_buffer_mapped(bh);
  594. clear_buffer_req(bh);
  595. clear_buffer_new(bh);
  596. gfs2_log_unlock(sdp);
  597. unlock_buffer(bh);
  598. }
  599. static void gfs2_invalidatepage(struct page *page, unsigned int offset,
  600. unsigned int length)
  601. {
  602. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  603. unsigned int stop = offset + length;
  604. int partial_page = (offset || length < PAGE_SIZE);
  605. struct buffer_head *bh, *head;
  606. unsigned long pos = 0;
  607. BUG_ON(!PageLocked(page));
  608. if (!partial_page)
  609. ClearPageChecked(page);
  610. if (!page_has_buffers(page))
  611. goto out;
  612. bh = head = page_buffers(page);
  613. do {
  614. if (pos + bh->b_size > stop)
  615. return;
  616. if (offset <= pos)
  617. gfs2_discard(sdp, bh);
  618. pos += bh->b_size;
  619. bh = bh->b_this_page;
  620. } while (bh != head);
  621. out:
  622. if (!partial_page)
  623. try_to_release_page(page, 0);
  624. }
  625. /**
  626. * gfs2_releasepage - free the metadata associated with a page
  627. * @page: the page that's being released
  628. * @gfp_mask: passed from Linux VFS, ignored by us
  629. *
  630. * Calls try_to_free_buffers() to free the buffers and put the page if the
  631. * buffers can be released.
  632. *
  633. * Returns: 1 if the page was put or else 0
  634. */
  635. int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
  636. {
  637. struct address_space *mapping = page->mapping;
  638. struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
  639. struct buffer_head *bh, *head;
  640. struct gfs2_bufdata *bd;
  641. if (!page_has_buffers(page))
  642. return 0;
  643. /*
  644. * From xfs_vm_releasepage: mm accommodates an old ext3 case where
  645. * clean pages might not have had the dirty bit cleared. Thus, it can
  646. * send actual dirty pages to ->releasepage() via shrink_active_list().
  647. *
  648. * As a workaround, we skip pages that contain dirty buffers below.
  649. * Once ->releasepage isn't called on dirty pages anymore, we can warn
  650. * on dirty buffers like we used to here again.
  651. */
  652. gfs2_log_lock(sdp);
  653. head = bh = page_buffers(page);
  654. do {
  655. if (atomic_read(&bh->b_count))
  656. goto cannot_release;
  657. bd = bh->b_private;
  658. if (bd && bd->bd_tr)
  659. goto cannot_release;
  660. if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
  661. goto cannot_release;
  662. bh = bh->b_this_page;
  663. } while(bh != head);
  664. head = bh = page_buffers(page);
  665. do {
  666. bd = bh->b_private;
  667. if (bd) {
  668. gfs2_assert_warn(sdp, bd->bd_bh == bh);
  669. bd->bd_bh = NULL;
  670. bh->b_private = NULL;
  671. /*
  672. * The bd may still be queued as a revoke, in which
  673. * case we must not dequeue nor free it.
  674. */
  675. if (!bd->bd_blkno && !list_empty(&bd->bd_list))
  676. list_del_init(&bd->bd_list);
  677. if (list_empty(&bd->bd_list))
  678. kmem_cache_free(gfs2_bufdata_cachep, bd);
  679. }
  680. bh = bh->b_this_page;
  681. } while (bh != head);
  682. gfs2_log_unlock(sdp);
  683. return try_to_free_buffers(page);
  684. cannot_release:
  685. gfs2_log_unlock(sdp);
  686. return 0;
  687. }
  688. static const struct address_space_operations gfs2_aops = {
  689. .writepage = gfs2_writepage,
  690. .writepages = gfs2_writepages,
  691. .readpage = gfs2_readpage,
  692. .readahead = gfs2_readahead,
  693. .set_page_dirty = iomap_set_page_dirty,
  694. .releasepage = iomap_releasepage,
  695. .invalidatepage = iomap_invalidatepage,
  696. .bmap = gfs2_bmap,
  697. .direct_IO = noop_direct_IO,
  698. .migratepage = iomap_migrate_page,
  699. .is_partially_uptodate = iomap_is_partially_uptodate,
  700. .error_remove_page = generic_error_remove_page,
  701. };
  702. static const struct address_space_operations gfs2_jdata_aops = {
  703. .writepage = gfs2_jdata_writepage,
  704. .writepages = gfs2_jdata_writepages,
  705. .readpage = gfs2_readpage,
  706. .readahead = gfs2_readahead,
  707. .set_page_dirty = jdata_set_page_dirty,
  708. .bmap = gfs2_bmap,
  709. .invalidatepage = gfs2_invalidatepage,
  710. .releasepage = gfs2_releasepage,
  711. .is_partially_uptodate = block_is_partially_uptodate,
  712. .error_remove_page = generic_error_remove_page,
  713. };
  714. void gfs2_set_aops(struct inode *inode)
  715. {
  716. if (gfs2_is_jdata(GFS2_I(inode)))
  717. inode->i_mapping->a_ops = &gfs2_jdata_aops;
  718. else
  719. inode->i_mapping->a_ops = &gfs2_aops;
  720. }