write.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* handling of writes to regular files and writing back to the server
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/backing-dev.h>
  8. #include <linux/slab.h>
  9. #include <linux/fs.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/writeback.h>
  12. #include <linux/pagevec.h>
  13. #include "internal.h"
  14. /*
  15. * mark a page as having been made dirty and thus needing writeback
  16. */
  17. int afs_set_page_dirty(struct page *page)
  18. {
  19. _enter("");
  20. return __set_page_dirty_nobuffers(page);
  21. }
  22. /*
  23. * partly or wholly fill a page that's under preparation for writing
  24. */
  25. static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
  26. loff_t pos, unsigned int len, struct page *page)
  27. {
  28. struct afs_read *req;
  29. size_t p;
  30. void *data;
  31. int ret;
  32. _enter(",,%llu", (unsigned long long)pos);
  33. if (pos >= vnode->vfs_inode.i_size) {
  34. p = pos & ~PAGE_MASK;
  35. ASSERTCMP(p + len, <=, PAGE_SIZE);
  36. data = kmap(page);
  37. memset(data + p, 0, len);
  38. kunmap(page);
  39. return 0;
  40. }
  41. req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
  42. if (!req)
  43. return -ENOMEM;
  44. refcount_set(&req->usage, 1);
  45. req->pos = pos;
  46. req->len = len;
  47. req->nr_pages = 1;
  48. req->pages = req->array;
  49. req->pages[0] = page;
  50. get_page(page);
  51. ret = afs_fetch_data(vnode, key, req);
  52. afs_put_read(req);
  53. if (ret < 0) {
  54. if (ret == -ENOENT) {
  55. _debug("got NOENT from server"
  56. " - marking file deleted and stale");
  57. set_bit(AFS_VNODE_DELETED, &vnode->flags);
  58. ret = -ESTALE;
  59. }
  60. }
  61. _leave(" = %d", ret);
  62. return ret;
  63. }
  64. /*
  65. * prepare to perform part of a write to a page
  66. */
  67. int afs_write_begin(struct file *file, struct address_space *mapping,
  68. loff_t pos, unsigned len, unsigned flags,
  69. struct page **_page, void **fsdata)
  70. {
  71. struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  72. struct page *page;
  73. struct key *key = afs_file_key(file);
  74. unsigned long priv;
  75. unsigned f, from = pos & (PAGE_SIZE - 1);
  76. unsigned t, to = from + len;
  77. pgoff_t index = pos >> PAGE_SHIFT;
  78. int ret;
  79. _enter("{%llx:%llu},{%lx},%u,%u",
  80. vnode->fid.vid, vnode->fid.vnode, index, from, to);
  81. page = grab_cache_page_write_begin(mapping, index, flags);
  82. if (!page)
  83. return -ENOMEM;
  84. if (!PageUptodate(page) && len != PAGE_SIZE) {
  85. ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
  86. if (ret < 0) {
  87. unlock_page(page);
  88. put_page(page);
  89. _leave(" = %d [prep]", ret);
  90. return ret;
  91. }
  92. SetPageUptodate(page);
  93. }
  94. try_again:
  95. /* See if this page is already partially written in a way that we can
  96. * merge the new write with.
  97. */
  98. t = f = 0;
  99. if (PagePrivate(page)) {
  100. priv = page_private(page);
  101. f = afs_page_dirty_from(priv);
  102. t = afs_page_dirty_to(priv);
  103. ASSERTCMP(f, <=, t);
  104. }
  105. if (f != t) {
  106. if (PageWriteback(page)) {
  107. trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
  108. page->index, priv);
  109. goto flush_conflicting_write;
  110. }
  111. /* If the file is being filled locally, allow inter-write
  112. * spaces to be merged into writes. If it's not, only write
  113. * back what the user gives us.
  114. */
  115. if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
  116. (to < f || from > t))
  117. goto flush_conflicting_write;
  118. }
  119. *_page = page;
  120. _leave(" = 0");
  121. return 0;
  122. /* The previous write and this write aren't adjacent or overlapping, so
  123. * flush the page out.
  124. */
  125. flush_conflicting_write:
  126. _debug("flush conflict");
  127. ret = write_one_page(page);
  128. if (ret < 0)
  129. goto error;
  130. ret = lock_page_killable(page);
  131. if (ret < 0)
  132. goto error;
  133. goto try_again;
  134. error:
  135. put_page(page);
  136. _leave(" = %d", ret);
  137. return ret;
  138. }
  139. /*
  140. * finalise part of a write to a page
  141. */
  142. int afs_write_end(struct file *file, struct address_space *mapping,
  143. loff_t pos, unsigned len, unsigned copied,
  144. struct page *page, void *fsdata)
  145. {
  146. struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  147. struct key *key = afs_file_key(file);
  148. unsigned long priv;
  149. unsigned int f, from = pos & (PAGE_SIZE - 1);
  150. unsigned int t, to = from + copied;
  151. loff_t i_size, maybe_i_size;
  152. int ret = 0;
  153. _enter("{%llx:%llu},{%lx}",
  154. vnode->fid.vid, vnode->fid.vnode, page->index);
  155. if (copied == 0)
  156. goto out;
  157. maybe_i_size = pos + copied;
  158. i_size = i_size_read(&vnode->vfs_inode);
  159. if (maybe_i_size > i_size) {
  160. write_seqlock(&vnode->cb_lock);
  161. i_size = i_size_read(&vnode->vfs_inode);
  162. if (maybe_i_size > i_size)
  163. afs_set_i_size(vnode, maybe_i_size);
  164. write_sequnlock(&vnode->cb_lock);
  165. }
  166. if (!PageUptodate(page)) {
  167. if (copied < len) {
  168. /* Try and load any missing data from the server. The
  169. * unmarshalling routine will take care of clearing any
  170. * bits that are beyond the EOF.
  171. */
  172. ret = afs_fill_page(vnode, key, pos + copied,
  173. len - copied, page);
  174. if (ret < 0)
  175. goto out;
  176. }
  177. SetPageUptodate(page);
  178. }
  179. if (PagePrivate(page)) {
  180. priv = page_private(page);
  181. f = afs_page_dirty_from(priv);
  182. t = afs_page_dirty_to(priv);
  183. if (from < f)
  184. f = from;
  185. if (to > t)
  186. t = to;
  187. priv = afs_page_dirty(f, t);
  188. set_page_private(page, priv);
  189. trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
  190. page->index, priv);
  191. } else {
  192. priv = afs_page_dirty(from, to);
  193. attach_page_private(page, (void *)priv);
  194. trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
  195. page->index, priv);
  196. }
  197. set_page_dirty(page);
  198. if (PageDirty(page))
  199. _debug("dirtied");
  200. ret = copied;
  201. out:
  202. unlock_page(page);
  203. put_page(page);
  204. return ret;
  205. }
  206. /*
  207. * kill all the pages in the given range
  208. */
  209. static void afs_kill_pages(struct address_space *mapping,
  210. pgoff_t first, pgoff_t last)
  211. {
  212. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  213. struct pagevec pv;
  214. unsigned count, loop;
  215. _enter("{%llx:%llu},%lx-%lx",
  216. vnode->fid.vid, vnode->fid.vnode, first, last);
  217. pagevec_init(&pv);
  218. do {
  219. _debug("kill %lx-%lx", first, last);
  220. count = last - first + 1;
  221. if (count > PAGEVEC_SIZE)
  222. count = PAGEVEC_SIZE;
  223. pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
  224. ASSERTCMP(pv.nr, ==, count);
  225. for (loop = 0; loop < count; loop++) {
  226. struct page *page = pv.pages[loop];
  227. ClearPageUptodate(page);
  228. SetPageError(page);
  229. end_page_writeback(page);
  230. if (page->index >= first)
  231. first = page->index + 1;
  232. lock_page(page);
  233. generic_error_remove_page(mapping, page);
  234. unlock_page(page);
  235. }
  236. __pagevec_release(&pv);
  237. } while (first <= last);
  238. _leave("");
  239. }
  240. /*
  241. * Redirty all the pages in a given range.
  242. */
  243. static void afs_redirty_pages(struct writeback_control *wbc,
  244. struct address_space *mapping,
  245. pgoff_t first, pgoff_t last)
  246. {
  247. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  248. struct pagevec pv;
  249. unsigned count, loop;
  250. _enter("{%llx:%llu},%lx-%lx",
  251. vnode->fid.vid, vnode->fid.vnode, first, last);
  252. pagevec_init(&pv);
  253. do {
  254. _debug("redirty %lx-%lx", first, last);
  255. count = last - first + 1;
  256. if (count > PAGEVEC_SIZE)
  257. count = PAGEVEC_SIZE;
  258. pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
  259. ASSERTCMP(pv.nr, ==, count);
  260. for (loop = 0; loop < count; loop++) {
  261. struct page *page = pv.pages[loop];
  262. redirty_page_for_writepage(wbc, page);
  263. end_page_writeback(page);
  264. if (page->index >= first)
  265. first = page->index + 1;
  266. }
  267. __pagevec_release(&pv);
  268. } while (first <= last);
  269. _leave("");
  270. }
  271. /*
  272. * completion of write to server
  273. */
  274. static void afs_pages_written_back(struct afs_vnode *vnode,
  275. pgoff_t first, pgoff_t last)
  276. {
  277. struct pagevec pv;
  278. unsigned long priv;
  279. unsigned count, loop;
  280. _enter("{%llx:%llu},{%lx-%lx}",
  281. vnode->fid.vid, vnode->fid.vnode, first, last);
  282. pagevec_init(&pv);
  283. do {
  284. _debug("done %lx-%lx", first, last);
  285. count = last - first + 1;
  286. if (count > PAGEVEC_SIZE)
  287. count = PAGEVEC_SIZE;
  288. pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
  289. first, count, pv.pages);
  290. ASSERTCMP(pv.nr, ==, count);
  291. for (loop = 0; loop < count; loop++) {
  292. priv = (unsigned long)detach_page_private(pv.pages[loop]);
  293. trace_afs_page_dirty(vnode, tracepoint_string("clear"),
  294. pv.pages[loop]->index, priv);
  295. end_page_writeback(pv.pages[loop]);
  296. }
  297. first += count;
  298. __pagevec_release(&pv);
  299. } while (first <= last);
  300. afs_prune_wb_keys(vnode);
  301. _leave("");
  302. }
  303. /*
  304. * Find a key to use for the writeback. We cached the keys used to author the
  305. * writes on the vnode. *_wbk will contain the last writeback key used or NULL
  306. * and we need to start from there if it's set.
  307. */
  308. static int afs_get_writeback_key(struct afs_vnode *vnode,
  309. struct afs_wb_key **_wbk)
  310. {
  311. struct afs_wb_key *wbk = NULL;
  312. struct list_head *p;
  313. int ret = -ENOKEY, ret2;
  314. spin_lock(&vnode->wb_lock);
  315. if (*_wbk)
  316. p = (*_wbk)->vnode_link.next;
  317. else
  318. p = vnode->wb_keys.next;
  319. while (p != &vnode->wb_keys) {
  320. wbk = list_entry(p, struct afs_wb_key, vnode_link);
  321. _debug("wbk %u", key_serial(wbk->key));
  322. ret2 = key_validate(wbk->key);
  323. if (ret2 == 0) {
  324. refcount_inc(&wbk->usage);
  325. _debug("USE WB KEY %u", key_serial(wbk->key));
  326. break;
  327. }
  328. wbk = NULL;
  329. if (ret == -ENOKEY)
  330. ret = ret2;
  331. p = p->next;
  332. }
  333. spin_unlock(&vnode->wb_lock);
  334. if (*_wbk)
  335. afs_put_wb_key(*_wbk);
  336. *_wbk = wbk;
  337. return 0;
  338. }
  339. static void afs_store_data_success(struct afs_operation *op)
  340. {
  341. struct afs_vnode *vnode = op->file[0].vnode;
  342. op->ctime = op->file[0].scb.status.mtime_client;
  343. afs_vnode_commit_status(op, &op->file[0]);
  344. if (op->error == 0) {
  345. if (!op->store.laundering)
  346. afs_pages_written_back(vnode, op->store.first, op->store.last);
  347. afs_stat_v(vnode, n_stores);
  348. atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
  349. (op->store.first * PAGE_SIZE + op->store.first_offset),
  350. &afs_v2net(vnode)->n_store_bytes);
  351. }
  352. }
  353. static const struct afs_operation_ops afs_store_data_operation = {
  354. .issue_afs_rpc = afs_fs_store_data,
  355. .issue_yfs_rpc = yfs_fs_store_data,
  356. .success = afs_store_data_success,
  357. };
  358. /*
  359. * write to a file
  360. */
  361. static int afs_store_data(struct address_space *mapping,
  362. pgoff_t first, pgoff_t last,
  363. unsigned offset, unsigned to, bool laundering)
  364. {
  365. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  366. struct afs_operation *op;
  367. struct afs_wb_key *wbk = NULL;
  368. int ret;
  369. _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
  370. vnode->volume->name,
  371. vnode->fid.vid,
  372. vnode->fid.vnode,
  373. vnode->fid.unique,
  374. first, last, offset, to);
  375. ret = afs_get_writeback_key(vnode, &wbk);
  376. if (ret) {
  377. _leave(" = %d [no keys]", ret);
  378. return ret;
  379. }
  380. op = afs_alloc_operation(wbk->key, vnode->volume);
  381. if (IS_ERR(op)) {
  382. afs_put_wb_key(wbk);
  383. return -ENOMEM;
  384. }
  385. afs_op_set_vnode(op, 0, vnode);
  386. op->file[0].dv_delta = 1;
  387. op->store.mapping = mapping;
  388. op->file[0].modification = true;
  389. op->store.first = first;
  390. op->store.last = last;
  391. op->store.first_offset = offset;
  392. op->store.last_to = to;
  393. op->store.laundering = laundering;
  394. op->mtime = vnode->vfs_inode.i_mtime;
  395. op->flags |= AFS_OPERATION_UNINTR;
  396. op->ops = &afs_store_data_operation;
  397. try_next_key:
  398. afs_begin_vnode_operation(op);
  399. afs_wait_for_operation(op);
  400. switch (op->error) {
  401. case -EACCES:
  402. case -EPERM:
  403. case -ENOKEY:
  404. case -EKEYEXPIRED:
  405. case -EKEYREJECTED:
  406. case -EKEYREVOKED:
  407. _debug("next");
  408. ret = afs_get_writeback_key(vnode, &wbk);
  409. if (ret == 0) {
  410. key_put(op->key);
  411. op->key = key_get(wbk->key);
  412. goto try_next_key;
  413. }
  414. break;
  415. }
  416. afs_put_wb_key(wbk);
  417. _leave(" = %d", op->error);
  418. return afs_put_operation(op);
  419. }
  420. /*
  421. * Synchronously write back the locked page and any subsequent non-locked dirty
  422. * pages.
  423. */
  424. static int afs_write_back_from_locked_page(struct address_space *mapping,
  425. struct writeback_control *wbc,
  426. struct page *primary_page,
  427. pgoff_t final_page)
  428. {
  429. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  430. struct page *pages[8], *page;
  431. unsigned long count, priv;
  432. unsigned n, offset, to, f, t;
  433. pgoff_t start, first, last;
  434. loff_t i_size, end;
  435. int loop, ret;
  436. _enter(",%lx", primary_page->index);
  437. count = 1;
  438. if (test_set_page_writeback(primary_page))
  439. BUG();
  440. /* Find all consecutive lockable dirty pages that have contiguous
  441. * written regions, stopping when we find a page that is not
  442. * immediately lockable, is not dirty or is missing, or we reach the
  443. * end of the range.
  444. */
  445. start = primary_page->index;
  446. priv = page_private(primary_page);
  447. offset = afs_page_dirty_from(priv);
  448. to = afs_page_dirty_to(priv);
  449. trace_afs_page_dirty(vnode, tracepoint_string("store"),
  450. primary_page->index, priv);
  451. WARN_ON(offset == to);
  452. if (offset == to)
  453. trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
  454. primary_page->index, priv);
  455. if (start >= final_page ||
  456. (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
  457. goto no_more;
  458. start++;
  459. do {
  460. _debug("more %lx [%lx]", start, count);
  461. n = final_page - start + 1;
  462. if (n > ARRAY_SIZE(pages))
  463. n = ARRAY_SIZE(pages);
  464. n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
  465. _debug("fgpc %u", n);
  466. if (n == 0)
  467. goto no_more;
  468. if (pages[0]->index != start) {
  469. do {
  470. put_page(pages[--n]);
  471. } while (n > 0);
  472. goto no_more;
  473. }
  474. for (loop = 0; loop < n; loop++) {
  475. page = pages[loop];
  476. if (to != PAGE_SIZE &&
  477. !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
  478. break;
  479. if (page->index > final_page)
  480. break;
  481. if (!trylock_page(page))
  482. break;
  483. if (!PageDirty(page) || PageWriteback(page)) {
  484. unlock_page(page);
  485. break;
  486. }
  487. priv = page_private(page);
  488. f = afs_page_dirty_from(priv);
  489. t = afs_page_dirty_to(priv);
  490. if (f != 0 &&
  491. !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
  492. unlock_page(page);
  493. break;
  494. }
  495. to = t;
  496. trace_afs_page_dirty(vnode, tracepoint_string("store+"),
  497. page->index, priv);
  498. if (!clear_page_dirty_for_io(page))
  499. BUG();
  500. if (test_set_page_writeback(page))
  501. BUG();
  502. unlock_page(page);
  503. put_page(page);
  504. }
  505. count += loop;
  506. if (loop < n) {
  507. for (; loop < n; loop++)
  508. put_page(pages[loop]);
  509. goto no_more;
  510. }
  511. start += loop;
  512. } while (start <= final_page && count < 65536);
  513. no_more:
  514. /* We now have a contiguous set of dirty pages, each with writeback
  515. * set; the first page is still locked at this point, but all the rest
  516. * have been unlocked.
  517. */
  518. unlock_page(primary_page);
  519. first = primary_page->index;
  520. last = first + count - 1;
  521. end = (loff_t)last * PAGE_SIZE + to;
  522. i_size = i_size_read(&vnode->vfs_inode);
  523. _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
  524. if (end > i_size)
  525. to = i_size & ~PAGE_MASK;
  526. ret = afs_store_data(mapping, first, last, offset, to, false);
  527. switch (ret) {
  528. case 0:
  529. ret = count;
  530. break;
  531. default:
  532. pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
  533. fallthrough;
  534. case -EACCES:
  535. case -EPERM:
  536. case -ENOKEY:
  537. case -EKEYEXPIRED:
  538. case -EKEYREJECTED:
  539. case -EKEYREVOKED:
  540. afs_redirty_pages(wbc, mapping, first, last);
  541. mapping_set_error(mapping, ret);
  542. break;
  543. case -EDQUOT:
  544. case -ENOSPC:
  545. afs_redirty_pages(wbc, mapping, first, last);
  546. mapping_set_error(mapping, -ENOSPC);
  547. break;
  548. case -EROFS:
  549. case -EIO:
  550. case -EREMOTEIO:
  551. case -EFBIG:
  552. case -ENOENT:
  553. case -ENOMEDIUM:
  554. case -ENXIO:
  555. trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
  556. afs_kill_pages(mapping, first, last);
  557. mapping_set_error(mapping, ret);
  558. break;
  559. }
  560. _leave(" = %d", ret);
  561. return ret;
  562. }
  563. /*
  564. * write a page back to the server
  565. * - the caller locked the page for us
  566. */
  567. int afs_writepage(struct page *page, struct writeback_control *wbc)
  568. {
  569. int ret;
  570. _enter("{%lx},", page->index);
  571. ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
  572. wbc->range_end >> PAGE_SHIFT);
  573. if (ret < 0) {
  574. _leave(" = %d", ret);
  575. return 0;
  576. }
  577. wbc->nr_to_write -= ret;
  578. _leave(" = 0");
  579. return 0;
  580. }
  581. /*
  582. * write a region of pages back to the server
  583. */
  584. static int afs_writepages_region(struct address_space *mapping,
  585. struct writeback_control *wbc,
  586. pgoff_t index, pgoff_t end, pgoff_t *_next)
  587. {
  588. struct page *page;
  589. int ret, n;
  590. _enter(",,%lx,%lx,", index, end);
  591. do {
  592. n = find_get_pages_range_tag(mapping, &index, end,
  593. PAGECACHE_TAG_DIRTY, 1, &page);
  594. if (!n)
  595. break;
  596. _debug("wback %lx", page->index);
  597. /*
  598. * at this point we hold neither the i_pages lock nor the
  599. * page lock: the page may be truncated or invalidated
  600. * (changing page->mapping to NULL), or even swizzled
  601. * back from swapper_space to tmpfs file mapping
  602. */
  603. ret = lock_page_killable(page);
  604. if (ret < 0) {
  605. put_page(page);
  606. _leave(" = %d", ret);
  607. return ret;
  608. }
  609. if (page->mapping != mapping || !PageDirty(page)) {
  610. unlock_page(page);
  611. put_page(page);
  612. continue;
  613. }
  614. if (PageWriteback(page)) {
  615. unlock_page(page);
  616. if (wbc->sync_mode != WB_SYNC_NONE)
  617. wait_on_page_writeback(page);
  618. put_page(page);
  619. continue;
  620. }
  621. if (!clear_page_dirty_for_io(page))
  622. BUG();
  623. ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
  624. put_page(page);
  625. if (ret < 0) {
  626. _leave(" = %d", ret);
  627. return ret;
  628. }
  629. wbc->nr_to_write -= ret;
  630. cond_resched();
  631. } while (index < end && wbc->nr_to_write > 0);
  632. *_next = index;
  633. _leave(" = 0 [%lx]", *_next);
  634. return 0;
  635. }
  636. /*
  637. * write some of the pending data back to the server
  638. */
  639. int afs_writepages(struct address_space *mapping,
  640. struct writeback_control *wbc)
  641. {
  642. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  643. pgoff_t start, end, next;
  644. int ret;
  645. _enter("");
  646. /* We have to be careful as we can end up racing with setattr()
  647. * truncating the pagecache since the caller doesn't take a lock here
  648. * to prevent it.
  649. */
  650. if (wbc->sync_mode == WB_SYNC_ALL)
  651. down_read(&vnode->validate_lock);
  652. else if (!down_read_trylock(&vnode->validate_lock))
  653. return 0;
  654. if (wbc->range_cyclic) {
  655. start = mapping->writeback_index;
  656. end = -1;
  657. ret = afs_writepages_region(mapping, wbc, start, end, &next);
  658. if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
  659. ret = afs_writepages_region(mapping, wbc, 0, start,
  660. &next);
  661. mapping->writeback_index = next;
  662. } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
  663. end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
  664. ret = afs_writepages_region(mapping, wbc, 0, end, &next);
  665. if (wbc->nr_to_write > 0)
  666. mapping->writeback_index = next;
  667. } else {
  668. start = wbc->range_start >> PAGE_SHIFT;
  669. end = wbc->range_end >> PAGE_SHIFT;
  670. ret = afs_writepages_region(mapping, wbc, start, end, &next);
  671. }
  672. up_read(&vnode->validate_lock);
  673. _leave(" = %d", ret);
  674. return ret;
  675. }
  676. /*
  677. * write to an AFS file
  678. */
  679. ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
  680. {
  681. struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
  682. ssize_t result;
  683. size_t count = iov_iter_count(from);
  684. _enter("{%llx:%llu},{%zu},",
  685. vnode->fid.vid, vnode->fid.vnode, count);
  686. if (IS_SWAPFILE(&vnode->vfs_inode)) {
  687. printk(KERN_INFO
  688. "AFS: Attempt to write to active swap file!\n");
  689. return -EBUSY;
  690. }
  691. if (!count)
  692. return 0;
  693. result = generic_file_write_iter(iocb, from);
  694. _leave(" = %zd", result);
  695. return result;
  696. }
  697. /*
  698. * flush any dirty pages for this process, and check for write errors.
  699. * - the return status from this call provides a reliable indication of
  700. * whether any write errors occurred for this process.
  701. */
  702. int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  703. {
  704. struct inode *inode = file_inode(file);
  705. struct afs_vnode *vnode = AFS_FS_I(inode);
  706. _enter("{%llx:%llu},{n=%pD},%d",
  707. vnode->fid.vid, vnode->fid.vnode, file,
  708. datasync);
  709. return file_write_and_wait_range(file, start, end);
  710. }
  711. /*
  712. * notification that a previously read-only page is about to become writable
  713. * - if it returns an error, the caller will deliver a bus error signal
  714. */
  715. vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
  716. {
  717. struct file *file = vmf->vma->vm_file;
  718. struct inode *inode = file_inode(file);
  719. struct afs_vnode *vnode = AFS_FS_I(inode);
  720. unsigned long priv;
  721. _enter("{{%llx:%llu}},{%lx}",
  722. vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
  723. sb_start_pagefault(inode->i_sb);
  724. /* Wait for the page to be written to the cache before we allow it to
  725. * be modified. We then assume the entire page will need writing back.
  726. */
  727. #ifdef CONFIG_AFS_FSCACHE
  728. fscache_wait_on_page_write(vnode->cache, vmf->page);
  729. #endif
  730. if (PageWriteback(vmf->page) &&
  731. wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
  732. return VM_FAULT_RETRY;
  733. if (lock_page_killable(vmf->page) < 0)
  734. return VM_FAULT_RETRY;
  735. /* We mustn't change page->private until writeback is complete as that
  736. * details the portion of the page we need to write back and we might
  737. * need to redirty the page if there's a problem.
  738. */
  739. wait_on_page_writeback(vmf->page);
  740. priv = afs_page_dirty(0, PAGE_SIZE);
  741. priv = afs_page_dirty_mmapped(priv);
  742. trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
  743. vmf->page->index, priv);
  744. if (PagePrivate(vmf->page))
  745. set_page_private(vmf->page, priv);
  746. else
  747. attach_page_private(vmf->page, (void *)priv);
  748. file_update_time(file);
  749. sb_end_pagefault(inode->i_sb);
  750. return VM_FAULT_LOCKED;
  751. }
  752. /*
  753. * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
  754. */
  755. void afs_prune_wb_keys(struct afs_vnode *vnode)
  756. {
  757. LIST_HEAD(graveyard);
  758. struct afs_wb_key *wbk, *tmp;
  759. /* Discard unused keys */
  760. spin_lock(&vnode->wb_lock);
  761. if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
  762. !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
  763. list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
  764. if (refcount_read(&wbk->usage) == 1)
  765. list_move(&wbk->vnode_link, &graveyard);
  766. }
  767. }
  768. spin_unlock(&vnode->wb_lock);
  769. while (!list_empty(&graveyard)) {
  770. wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
  771. list_del(&wbk->vnode_link);
  772. afs_put_wb_key(wbk);
  773. }
  774. }
  775. /*
  776. * Clean up a page during invalidation.
  777. */
  778. int afs_launder_page(struct page *page)
  779. {
  780. struct address_space *mapping = page->mapping;
  781. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  782. unsigned long priv;
  783. unsigned int f, t;
  784. int ret = 0;
  785. _enter("{%lx}", page->index);
  786. priv = page_private(page);
  787. if (clear_page_dirty_for_io(page)) {
  788. f = 0;
  789. t = PAGE_SIZE;
  790. if (PagePrivate(page)) {
  791. f = afs_page_dirty_from(priv);
  792. t = afs_page_dirty_to(priv);
  793. }
  794. trace_afs_page_dirty(vnode, tracepoint_string("launder"),
  795. page->index, priv);
  796. ret = afs_store_data(mapping, page->index, page->index, t, f, true);
  797. }
  798. priv = (unsigned long)detach_page_private(page);
  799. trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
  800. page->index, priv);
  801. #ifdef CONFIG_AFS_FSCACHE
  802. if (PageFsCache(page)) {
  803. fscache_wait_on_page_write(vnode->cache, page);
  804. fscache_uncache_page(vnode->cache, page);
  805. }
  806. #endif
  807. return ret;
  808. }