read.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/fs/nfs/read.c
  4. *
  5. * Block I/O for NFS
  6. *
  7. * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  8. * modified for async RPC by okir@monad.swb.de
  9. */
  10. #include <linux/time.h>
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/fcntl.h>
  14. #include <linux/stat.h>
  15. #include <linux/mm.h>
  16. #include <linux/slab.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/sunrpc/clnt.h>
  19. #include <linux/nfs_fs.h>
  20. #include <linux/nfs_page.h>
  21. #include <linux/module.h>
  22. #include "nfs4_fs.h"
  23. #include "internal.h"
  24. #include "iostat.h"
  25. #include "fscache.h"
  26. #include "pnfs.h"
  27. #include "nfstrace.h"
  28. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  29. static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
  30. static const struct nfs_rw_ops nfs_rw_read_ops;
  31. static struct kmem_cache *nfs_rdata_cachep;
  32. static struct nfs_pgio_header *nfs_readhdr_alloc(void)
  33. {
  34. struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
  35. if (p)
  36. p->rw_mode = FMODE_READ;
  37. return p;
  38. }
  39. static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
  40. {
  41. kmem_cache_free(nfs_rdata_cachep, rhdr);
  42. }
  43. static
  44. int nfs_return_empty_page(struct page *page)
  45. {
  46. zero_user(page, 0, PAGE_SIZE);
  47. SetPageUptodate(page);
  48. unlock_page(page);
  49. return 0;
  50. }
  51. void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
  52. struct inode *inode, bool force_mds,
  53. const struct nfs_pgio_completion_ops *compl_ops)
  54. {
  55. struct nfs_server *server = NFS_SERVER(inode);
  56. const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
  57. #ifdef CONFIG_NFS_V4_1
  58. if (server->pnfs_curr_ld && !force_mds)
  59. pg_ops = server->pnfs_curr_ld->pg_read_ops;
  60. #endif
  61. nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
  62. server->rsize, 0);
  63. }
  64. EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
  65. void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
  66. {
  67. struct nfs_pgio_mirror *mirror;
  68. if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
  69. pgio->pg_ops->pg_cleanup(pgio);
  70. pgio->pg_ops = &nfs_pgio_rw_ops;
  71. /* read path should never have more than one mirror */
  72. WARN_ON_ONCE(pgio->pg_mirror_count != 1);
  73. mirror = &pgio->pg_mirrors[0];
  74. mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
  75. }
  76. EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
  77. static void nfs_readpage_release(struct nfs_page *req, int error)
  78. {
  79. struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
  80. struct page *page = req->wb_page;
  81. dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
  82. (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
  83. (long long)req_offset(req));
  84. if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
  85. SetPageError(page);
  86. if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
  87. struct address_space *mapping = page_file_mapping(page);
  88. if (PageUptodate(page))
  89. nfs_readpage_to_fscache(inode, page, 0);
  90. else if (!PageError(page) && !PagePrivate(page))
  91. generic_error_remove_page(mapping, page);
  92. unlock_page(page);
  93. }
  94. nfs_release_request(req);
  95. }
  96. int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
  97. struct page *page)
  98. {
  99. struct nfs_page *new;
  100. unsigned int len;
  101. struct nfs_pageio_descriptor pgio;
  102. struct nfs_pgio_mirror *pgm;
  103. len = nfs_page_length(page);
  104. if (len == 0)
  105. return nfs_return_empty_page(page);
  106. new = nfs_create_request(ctx, page, 0, len);
  107. if (IS_ERR(new)) {
  108. unlock_page(page);
  109. return PTR_ERR(new);
  110. }
  111. if (len < PAGE_SIZE)
  112. zero_user_segment(page, len, PAGE_SIZE);
  113. nfs_pageio_init_read(&pgio, inode, false,
  114. &nfs_async_read_completion_ops);
  115. if (!nfs_pageio_add_request(&pgio, new)) {
  116. nfs_list_remove_request(new);
  117. nfs_readpage_release(new, pgio.pg_error);
  118. }
  119. nfs_pageio_complete(&pgio);
  120. /* It doesn't make sense to do mirrored reads! */
  121. WARN_ON_ONCE(pgio.pg_mirror_count != 1);
  122. pgm = &pgio.pg_mirrors[0];
  123. NFS_I(inode)->read_io += pgm->pg_bytes_written;
  124. return pgio.pg_error < 0 ? pgio.pg_error : 0;
  125. }
  126. static void nfs_page_group_set_uptodate(struct nfs_page *req)
  127. {
  128. if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
  129. SetPageUptodate(req->wb_page);
  130. }
  131. static void nfs_read_completion(struct nfs_pgio_header *hdr)
  132. {
  133. unsigned long bytes = 0;
  134. int error;
  135. if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
  136. goto out;
  137. while (!list_empty(&hdr->pages)) {
  138. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  139. struct page *page = req->wb_page;
  140. unsigned long start = req->wb_pgbase;
  141. unsigned long end = req->wb_pgbase + req->wb_bytes;
  142. if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
  143. /* note: regions of the page not covered by a
  144. * request are zeroed in nfs_readpage_async /
  145. * readpage_async_filler */
  146. if (bytes > hdr->good_bytes) {
  147. /* nothing in this request was good, so zero
  148. * the full extent of the request */
  149. zero_user_segment(page, start, end);
  150. } else if (hdr->good_bytes - bytes < req->wb_bytes) {
  151. /* part of this request has good bytes, but
  152. * not all. zero the bad bytes */
  153. start += hdr->good_bytes - bytes;
  154. WARN_ON(start < req->wb_pgbase);
  155. zero_user_segment(page, start, end);
  156. }
  157. }
  158. error = 0;
  159. bytes += req->wb_bytes;
  160. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
  161. if (bytes <= hdr->good_bytes)
  162. nfs_page_group_set_uptodate(req);
  163. else {
  164. error = hdr->error;
  165. xchg(&nfs_req_openctx(req)->error, error);
  166. }
  167. } else
  168. nfs_page_group_set_uptodate(req);
  169. nfs_list_remove_request(req);
  170. nfs_readpage_release(req, error);
  171. }
  172. out:
  173. hdr->release(hdr);
  174. }
  175. static void nfs_initiate_read(struct nfs_pgio_header *hdr,
  176. struct rpc_message *msg,
  177. const struct nfs_rpc_ops *rpc_ops,
  178. struct rpc_task_setup *task_setup_data, int how)
  179. {
  180. struct inode *inode = hdr->inode;
  181. int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
  182. task_setup_data->flags |= swap_flags;
  183. rpc_ops->read_setup(hdr, msg);
  184. trace_nfs_initiate_read(hdr);
  185. }
  186. static void
  187. nfs_async_read_error(struct list_head *head, int error)
  188. {
  189. struct nfs_page *req;
  190. while (!list_empty(head)) {
  191. req = nfs_list_entry(head->next);
  192. nfs_list_remove_request(req);
  193. nfs_readpage_release(req, error);
  194. }
  195. }
  196. static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
  197. .error_cleanup = nfs_async_read_error,
  198. .completion = nfs_read_completion,
  199. };
  200. /*
  201. * This is the callback from RPC telling us whether a reply was
  202. * received or some error occurred (timeout or socket shutdown).
  203. */
  204. static int nfs_readpage_done(struct rpc_task *task,
  205. struct nfs_pgio_header *hdr,
  206. struct inode *inode)
  207. {
  208. int status = NFS_PROTO(inode)->read_done(task, hdr);
  209. if (status != 0)
  210. return status;
  211. nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
  212. trace_nfs_readpage_done(task, hdr);
  213. if (task->tk_status == -ESTALE) {
  214. nfs_set_inode_stale(inode);
  215. nfs_mark_for_revalidate(inode);
  216. }
  217. return 0;
  218. }
  219. static void nfs_readpage_retry(struct rpc_task *task,
  220. struct nfs_pgio_header *hdr)
  221. {
  222. struct nfs_pgio_args *argp = &hdr->args;
  223. struct nfs_pgio_res *resp = &hdr->res;
  224. /* This is a short read! */
  225. nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
  226. trace_nfs_readpage_short(task, hdr);
  227. /* Has the server at least made some progress? */
  228. if (resp->count == 0) {
  229. nfs_set_pgio_error(hdr, -EIO, argp->offset);
  230. return;
  231. }
  232. /* For non rpc-based layout drivers, retry-through-MDS */
  233. if (!task->tk_ops) {
  234. hdr->pnfs_error = -EAGAIN;
  235. return;
  236. }
  237. /* Yes, so retry the read at the end of the hdr */
  238. hdr->mds_offset += resp->count;
  239. argp->offset += resp->count;
  240. argp->pgbase += resp->count;
  241. argp->count -= resp->count;
  242. resp->count = 0;
  243. resp->eof = 0;
  244. rpc_restart_call_prepare(task);
  245. }
  246. static void nfs_readpage_result(struct rpc_task *task,
  247. struct nfs_pgio_header *hdr)
  248. {
  249. if (hdr->res.eof) {
  250. loff_t pos = hdr->args.offset + hdr->res.count;
  251. unsigned int new = pos - hdr->io_start;
  252. if (hdr->good_bytes > new) {
  253. hdr->good_bytes = new;
  254. set_bit(NFS_IOHDR_EOF, &hdr->flags);
  255. clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
  256. }
  257. } else if (hdr->res.count < hdr->args.count)
  258. nfs_readpage_retry(task, hdr);
  259. }
  260. /*
  261. * Read a page over NFS.
  262. * We read the page synchronously in the following case:
  263. * - The error flag is set for this page. This happens only when a
  264. * previous async read operation failed.
  265. */
  266. int nfs_readpage(struct file *file, struct page *page)
  267. {
  268. struct nfs_open_context *ctx;
  269. struct inode *inode = page_file_mapping(page)->host;
  270. int error;
  271. dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
  272. page, PAGE_SIZE, page_index(page));
  273. nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
  274. nfs_add_stats(inode, NFSIOS_READPAGES, 1);
  275. /*
  276. * Try to flush any pending writes to the file..
  277. *
  278. * NOTE! Because we own the page lock, there cannot
  279. * be any new pending writes generated at this point
  280. * for this page (other pages can be written to).
  281. */
  282. error = nfs_wb_page(inode, page);
  283. if (error)
  284. goto out_unlock;
  285. if (PageUptodate(page))
  286. goto out_unlock;
  287. error = -ESTALE;
  288. if (NFS_STALE(inode))
  289. goto out_unlock;
  290. if (file == NULL) {
  291. error = -EBADF;
  292. ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  293. if (ctx == NULL)
  294. goto out_unlock;
  295. } else
  296. ctx = get_nfs_open_context(nfs_file_open_context(file));
  297. if (!IS_SYNC(inode)) {
  298. error = nfs_readpage_from_fscache(ctx, inode, page);
  299. if (error == 0)
  300. goto out;
  301. }
  302. xchg(&ctx->error, 0);
  303. error = nfs_readpage_async(ctx, inode, page);
  304. if (!error) {
  305. error = wait_on_page_locked_killable(page);
  306. if (!PageUptodate(page) && !error)
  307. error = xchg(&ctx->error, 0);
  308. }
  309. out:
  310. put_nfs_open_context(ctx);
  311. return error;
  312. out_unlock:
  313. unlock_page(page);
  314. return error;
  315. }
  316. struct nfs_readdesc {
  317. struct nfs_pageio_descriptor *pgio;
  318. struct nfs_open_context *ctx;
  319. };
  320. static int
  321. readpage_async_filler(void *data, struct page *page)
  322. {
  323. struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
  324. struct nfs_page *new;
  325. unsigned int len;
  326. int error;
  327. len = nfs_page_length(page);
  328. if (len == 0)
  329. return nfs_return_empty_page(page);
  330. new = nfs_create_request(desc->ctx, page, 0, len);
  331. if (IS_ERR(new))
  332. goto out_error;
  333. if (len < PAGE_SIZE)
  334. zero_user_segment(page, len, PAGE_SIZE);
  335. if (!nfs_pageio_add_request(desc->pgio, new)) {
  336. nfs_list_remove_request(new);
  337. error = desc->pgio->pg_error;
  338. nfs_readpage_release(new, error);
  339. goto out;
  340. }
  341. return 0;
  342. out_error:
  343. error = PTR_ERR(new);
  344. unlock_page(page);
  345. out:
  346. return error;
  347. }
  348. int nfs_readpages(struct file *filp, struct address_space *mapping,
  349. struct list_head *pages, unsigned nr_pages)
  350. {
  351. struct nfs_pageio_descriptor pgio;
  352. struct nfs_pgio_mirror *pgm;
  353. struct nfs_readdesc desc = {
  354. .pgio = &pgio,
  355. };
  356. struct inode *inode = mapping->host;
  357. unsigned long npages;
  358. int ret = -ESTALE;
  359. dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
  360. inode->i_sb->s_id,
  361. (unsigned long long)NFS_FILEID(inode),
  362. nr_pages);
  363. nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
  364. if (NFS_STALE(inode))
  365. goto out;
  366. if (filp == NULL) {
  367. desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  368. if (desc.ctx == NULL)
  369. return -EBADF;
  370. } else
  371. desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
  372. /* attempt to read as many of the pages as possible from the cache
  373. * - this returns -ENOBUFS immediately if the cookie is negative
  374. */
  375. ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
  376. pages, &nr_pages);
  377. if (ret == 0)
  378. goto read_complete; /* all pages were read */
  379. nfs_pageio_init_read(&pgio, inode, false,
  380. &nfs_async_read_completion_ops);
  381. ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
  382. nfs_pageio_complete(&pgio);
  383. /* It doesn't make sense to do mirrored reads! */
  384. WARN_ON_ONCE(pgio.pg_mirror_count != 1);
  385. pgm = &pgio.pg_mirrors[0];
  386. NFS_I(inode)->read_io += pgm->pg_bytes_written;
  387. npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
  388. PAGE_SHIFT;
  389. nfs_add_stats(inode, NFSIOS_READPAGES, npages);
  390. read_complete:
  391. put_nfs_open_context(desc.ctx);
  392. out:
  393. return ret;
  394. }
  395. int __init nfs_init_readpagecache(void)
  396. {
  397. nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
  398. sizeof(struct nfs_pgio_header),
  399. 0, SLAB_HWCACHE_ALIGN,
  400. NULL);
  401. if (nfs_rdata_cachep == NULL)
  402. return -ENOMEM;
  403. return 0;
  404. }
  405. void nfs_destroy_readpagecache(void)
  406. {
  407. kmem_cache_destroy(nfs_rdata_cachep);
  408. }
  409. static const struct nfs_rw_ops nfs_rw_read_ops = {
  410. .rw_alloc_header = nfs_readhdr_alloc,
  411. .rw_free_header = nfs_readhdr_free,
  412. .rw_done = nfs_readpage_done,
  413. .rw_result = nfs_readpage_result,
  414. .rw_initiate = nfs_initiate_read,
  415. };