read.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /*
  2. * linux/fs/nfs/read.c
  3. *
  4. * Block I/O for NFS
  5. *
  6. * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  7. * modified for async RPC by okir@monad.swb.de
  8. */
  9. #include <linux/time.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/fcntl.h>
  13. #include <linux/stat.h>
  14. #include <linux/mm.h>
  15. #include <linux/slab.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/sunrpc/clnt.h>
  18. #include <linux/nfs_fs.h>
  19. #include <linux/nfs_page.h>
  20. #include <linux/smp_lock.h>
  21. #include <asm/system.h>
  22. #include "internal.h"
  23. #include "iostat.h"
  24. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  25. static int nfs_pagein_one(struct list_head *, struct inode *);
  26. static const struct rpc_call_ops nfs_read_partial_ops;
  27. static const struct rpc_call_ops nfs_read_full_ops;
  28. static struct kmem_cache *nfs_rdata_cachep;
  29. static mempool_t *nfs_rdata_mempool;
  30. #define MIN_POOL_READ (32)
  31. struct nfs_read_data *nfs_readdata_alloc(size_t len)
  32. {
  33. unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  34. struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
  35. if (p) {
  36. memset(p, 0, sizeof(*p));
  37. INIT_LIST_HEAD(&p->pages);
  38. p->npages = pagecount;
  39. if (pagecount <= ARRAY_SIZE(p->page_array))
  40. p->pagevec = p->page_array;
  41. else {
  42. p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
  43. if (!p->pagevec) {
  44. mempool_free(p, nfs_rdata_mempool);
  45. p = NULL;
  46. }
  47. }
  48. }
  49. return p;
  50. }
  51. static void nfs_readdata_rcu_free(struct rcu_head *head)
  52. {
  53. struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
  54. if (p && (p->pagevec != &p->page_array[0]))
  55. kfree(p->pagevec);
  56. mempool_free(p, nfs_rdata_mempool);
  57. }
  58. static void nfs_readdata_free(struct nfs_read_data *rdata)
  59. {
  60. call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
  61. }
  62. void nfs_readdata_release(void *data)
  63. {
  64. nfs_readdata_free(data);
  65. }
  66. static
  67. int nfs_return_empty_page(struct page *page)
  68. {
  69. memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
  70. SetPageUptodate(page);
  71. unlock_page(page);
  72. return 0;
  73. }
  74. static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
  75. {
  76. unsigned int remainder = data->args.count - data->res.count;
  77. unsigned int base = data->args.pgbase + data->res.count;
  78. unsigned int pglen;
  79. struct page **pages;
  80. if (data->res.eof == 0 || remainder == 0)
  81. return;
  82. /*
  83. * Note: "remainder" can never be negative, since we check for
  84. * this in the XDR code.
  85. */
  86. pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
  87. base &= ~PAGE_CACHE_MASK;
  88. pglen = PAGE_CACHE_SIZE - base;
  89. for (;;) {
  90. if (remainder <= pglen) {
  91. memclear_highpage_flush(*pages, base, remainder);
  92. break;
  93. }
  94. memclear_highpage_flush(*pages, base, pglen);
  95. pages++;
  96. remainder -= pglen;
  97. pglen = PAGE_CACHE_SIZE;
  98. base = 0;
  99. }
  100. }
  101. static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
  102. struct page *page)
  103. {
  104. LIST_HEAD(one_request);
  105. struct nfs_page *new;
  106. unsigned int len;
  107. len = nfs_page_length(page);
  108. if (len == 0)
  109. return nfs_return_empty_page(page);
  110. new = nfs_create_request(ctx, inode, page, 0, len);
  111. if (IS_ERR(new)) {
  112. unlock_page(page);
  113. return PTR_ERR(new);
  114. }
  115. if (len < PAGE_CACHE_SIZE)
  116. memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
  117. nfs_list_add_request(new, &one_request);
  118. nfs_pagein_one(&one_request, inode);
  119. return 0;
  120. }
  121. static void nfs_readpage_release(struct nfs_page *req)
  122. {
  123. unlock_page(req->wb_page);
  124. dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
  125. req->wb_context->dentry->d_inode->i_sb->s_id,
  126. (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
  127. req->wb_bytes,
  128. (long long)req_offset(req));
  129. nfs_clear_request(req);
  130. nfs_release_request(req);
  131. }
  132. /*
  133. * Set up the NFS read request struct
  134. */
  135. static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
  136. const struct rpc_call_ops *call_ops,
  137. unsigned int count, unsigned int offset)
  138. {
  139. struct inode *inode;
  140. int flags;
  141. data->req = req;
  142. data->inode = inode = req->wb_context->dentry->d_inode;
  143. data->cred = req->wb_context->cred;
  144. data->args.fh = NFS_FH(inode);
  145. data->args.offset = req_offset(req) + offset;
  146. data->args.pgbase = req->wb_pgbase + offset;
  147. data->args.pages = data->pagevec;
  148. data->args.count = count;
  149. data->args.context = req->wb_context;
  150. data->res.fattr = &data->fattr;
  151. data->res.count = count;
  152. data->res.eof = 0;
  153. nfs_fattr_init(&data->fattr);
  154. /* Set up the initial task struct. */
  155. flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
  156. rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
  157. NFS_PROTO(inode)->read_setup(data);
  158. data->task.tk_cookie = (unsigned long)inode;
  159. dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
  160. data->task.tk_pid,
  161. inode->i_sb->s_id,
  162. (long long)NFS_FILEID(inode),
  163. count,
  164. (unsigned long long)data->args.offset);
  165. }
  166. static void
  167. nfs_async_read_error(struct list_head *head)
  168. {
  169. struct nfs_page *req;
  170. while (!list_empty(head)) {
  171. req = nfs_list_entry(head->next);
  172. nfs_list_remove_request(req);
  173. SetPageError(req->wb_page);
  174. nfs_readpage_release(req);
  175. }
  176. }
  177. /*
  178. * Start an async read operation
  179. */
  180. static void nfs_execute_read(struct nfs_read_data *data)
  181. {
  182. struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
  183. sigset_t oldset;
  184. rpc_clnt_sigmask(clnt, &oldset);
  185. rpc_execute(&data->task);
  186. rpc_clnt_sigunmask(clnt, &oldset);
  187. }
  188. /*
  189. * Generate multiple requests to fill a single page.
  190. *
  191. * We optimize to reduce the number of read operations on the wire. If we
  192. * detect that we're reading a page, or an area of a page, that is past the
  193. * end of file, we do not generate NFS read operations but just clear the
  194. * parts of the page that would have come back zero from the server anyway.
  195. *
  196. * We rely on the cached value of i_size to make this determination; another
  197. * client can fill pages on the server past our cached end-of-file, but we
  198. * won't see the new data until our attribute cache is updated. This is more
  199. * or less conventional NFS client behavior.
  200. */
  201. static int nfs_pagein_multi(struct list_head *head, struct inode *inode)
  202. {
  203. struct nfs_page *req = nfs_list_entry(head->next);
  204. struct page *page = req->wb_page;
  205. struct nfs_read_data *data;
  206. size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
  207. unsigned int offset;
  208. int requests = 0;
  209. LIST_HEAD(list);
  210. nfs_list_remove_request(req);
  211. nbytes = req->wb_bytes;
  212. do {
  213. size_t len = min(nbytes,rsize);
  214. data = nfs_readdata_alloc(len);
  215. if (!data)
  216. goto out_bad;
  217. INIT_LIST_HEAD(&data->pages);
  218. list_add(&data->pages, &list);
  219. requests++;
  220. nbytes -= len;
  221. } while(nbytes != 0);
  222. atomic_set(&req->wb_complete, requests);
  223. ClearPageError(page);
  224. offset = 0;
  225. nbytes = req->wb_bytes;
  226. do {
  227. data = list_entry(list.next, struct nfs_read_data, pages);
  228. list_del_init(&data->pages);
  229. data->pagevec[0] = page;
  230. if (nbytes > rsize) {
  231. nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
  232. rsize, offset);
  233. offset += rsize;
  234. nbytes -= rsize;
  235. } else {
  236. nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
  237. nbytes, offset);
  238. nbytes = 0;
  239. }
  240. nfs_execute_read(data);
  241. } while (nbytes != 0);
  242. return 0;
  243. out_bad:
  244. while (!list_empty(&list)) {
  245. data = list_entry(list.next, struct nfs_read_data, pages);
  246. list_del(&data->pages);
  247. nfs_readdata_free(data);
  248. }
  249. SetPageError(page);
  250. nfs_readpage_release(req);
  251. return -ENOMEM;
  252. }
  253. static int nfs_pagein_one(struct list_head *head, struct inode *inode)
  254. {
  255. struct nfs_page *req;
  256. struct page **pages;
  257. struct nfs_read_data *data;
  258. unsigned int count;
  259. if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
  260. return nfs_pagein_multi(head, inode);
  261. data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize);
  262. if (!data)
  263. goto out_bad;
  264. INIT_LIST_HEAD(&data->pages);
  265. pages = data->pagevec;
  266. count = 0;
  267. while (!list_empty(head)) {
  268. req = nfs_list_entry(head->next);
  269. nfs_list_remove_request(req);
  270. nfs_list_add_request(req, &data->pages);
  271. ClearPageError(req->wb_page);
  272. *pages++ = req->wb_page;
  273. count += req->wb_bytes;
  274. }
  275. req = nfs_list_entry(data->pages.next);
  276. nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
  277. nfs_execute_read(data);
  278. return 0;
  279. out_bad:
  280. nfs_async_read_error(head);
  281. return -ENOMEM;
  282. }
  283. static int
  284. nfs_pagein_list(struct list_head *head, int rpages)
  285. {
  286. LIST_HEAD(one_request);
  287. struct nfs_page *req;
  288. int error = 0;
  289. unsigned int pages = 0;
  290. while (!list_empty(head)) {
  291. pages += nfs_coalesce_requests(head, &one_request, rpages);
  292. req = nfs_list_entry(one_request.next);
  293. error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode);
  294. if (error < 0)
  295. break;
  296. }
  297. if (error >= 0)
  298. return pages;
  299. nfs_async_read_error(head);
  300. return error;
  301. }
  302. /*
  303. * This is the callback from RPC telling us whether a reply was
  304. * received or some error occurred (timeout or socket shutdown).
  305. */
  306. int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
  307. {
  308. int status;
  309. dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid,
  310. task->tk_status);
  311. status = NFS_PROTO(data->inode)->read_done(task, data);
  312. if (status != 0)
  313. return status;
  314. nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
  315. if (task->tk_status == -ESTALE) {
  316. set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
  317. nfs_mark_for_revalidate(data->inode);
  318. }
  319. spin_lock(&data->inode->i_lock);
  320. NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
  321. spin_unlock(&data->inode->i_lock);
  322. return 0;
  323. }
  324. static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
  325. {
  326. struct nfs_readargs *argp = &data->args;
  327. struct nfs_readres *resp = &data->res;
  328. if (resp->eof || resp->count == argp->count)
  329. return 0;
  330. /* This is a short read! */
  331. nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
  332. /* Has the server at least made some progress? */
  333. if (resp->count == 0)
  334. return 0;
  335. /* Yes, so retry the read at the end of the data */
  336. argp->offset += resp->count;
  337. argp->pgbase += resp->count;
  338. argp->count -= resp->count;
  339. rpc_restart_call(task);
  340. return -EAGAIN;
  341. }
  342. /*
  343. * Handle a read reply that fills part of a page.
  344. */
  345. static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
  346. {
  347. struct nfs_read_data *data = calldata;
  348. struct nfs_page *req = data->req;
  349. struct page *page = req->wb_page;
  350. if (nfs_readpage_result(task, data) != 0)
  351. return;
  352. if (likely(task->tk_status >= 0)) {
  353. nfs_readpage_truncate_uninitialised_page(data);
  354. if (nfs_readpage_retry(task, data) != 0)
  355. return;
  356. }
  357. if (unlikely(task->tk_status < 0))
  358. SetPageError(page);
  359. if (atomic_dec_and_test(&req->wb_complete)) {
  360. if (!PageError(page))
  361. SetPageUptodate(page);
  362. nfs_readpage_release(req);
  363. }
  364. }
  365. static const struct rpc_call_ops nfs_read_partial_ops = {
  366. .rpc_call_done = nfs_readpage_result_partial,
  367. .rpc_release = nfs_readdata_release,
  368. };
  369. static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
  370. {
  371. unsigned int count = data->res.count;
  372. unsigned int base = data->args.pgbase;
  373. struct page **pages;
  374. if (data->res.eof)
  375. count = data->args.count;
  376. if (unlikely(count == 0))
  377. return;
  378. pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
  379. base &= ~PAGE_CACHE_MASK;
  380. count += base;
  381. for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
  382. SetPageUptodate(*pages);
  383. if (count == 0)
  384. return;
  385. /* Was this a short read? */
  386. if (data->res.eof || data->res.count == data->args.count)
  387. SetPageUptodate(*pages);
  388. }
  389. /*
  390. * This is the callback from RPC telling us whether a reply was
  391. * received or some error occurred (timeout or socket shutdown).
  392. */
  393. static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
  394. {
  395. struct nfs_read_data *data = calldata;
  396. if (nfs_readpage_result(task, data) != 0)
  397. return;
  398. /*
  399. * Note: nfs_readpage_retry may change the values of
  400. * data->args. In the multi-page case, we therefore need
  401. * to ensure that we call nfs_readpage_set_pages_uptodate()
  402. * first.
  403. */
  404. if (likely(task->tk_status >= 0)) {
  405. nfs_readpage_truncate_uninitialised_page(data);
  406. nfs_readpage_set_pages_uptodate(data);
  407. if (nfs_readpage_retry(task, data) != 0)
  408. return;
  409. }
  410. while (!list_empty(&data->pages)) {
  411. struct nfs_page *req = nfs_list_entry(data->pages.next);
  412. nfs_list_remove_request(req);
  413. nfs_readpage_release(req);
  414. }
  415. }
  416. static const struct rpc_call_ops nfs_read_full_ops = {
  417. .rpc_call_done = nfs_readpage_result_full,
  418. .rpc_release = nfs_readdata_release,
  419. };
  420. /*
  421. * Read a page over NFS.
  422. * We read the page synchronously in the following case:
  423. * - The error flag is set for this page. This happens only when a
  424. * previous async read operation failed.
  425. */
  426. int nfs_readpage(struct file *file, struct page *page)
  427. {
  428. struct nfs_open_context *ctx;
  429. struct inode *inode = page->mapping->host;
  430. int error;
  431. dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
  432. page, PAGE_CACHE_SIZE, page->index);
  433. nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
  434. nfs_add_stats(inode, NFSIOS_READPAGES, 1);
  435. /*
  436. * Try to flush any pending writes to the file..
  437. *
  438. * NOTE! Because we own the page lock, there cannot
  439. * be any new pending writes generated at this point
  440. * for this page (other pages can be written to).
  441. */
  442. error = nfs_wb_page(inode, page);
  443. if (error)
  444. goto out_error;
  445. error = -ESTALE;
  446. if (NFS_STALE(inode))
  447. goto out_error;
  448. if (file == NULL) {
  449. error = -EBADF;
  450. ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  451. if (ctx == NULL)
  452. goto out_error;
  453. } else
  454. ctx = get_nfs_open_context((struct nfs_open_context *)
  455. file->private_data);
  456. error = nfs_readpage_async(ctx, inode, page);
  457. put_nfs_open_context(ctx);
  458. return error;
  459. out_error:
  460. unlock_page(page);
  461. return error;
  462. }
  463. struct nfs_readdesc {
  464. struct list_head *head;
  465. struct nfs_open_context *ctx;
  466. };
  467. static int
  468. readpage_async_filler(void *data, struct page *page)
  469. {
  470. struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
  471. struct inode *inode = page->mapping->host;
  472. struct nfs_page *new;
  473. unsigned int len;
  474. nfs_wb_page(inode, page);
  475. len = nfs_page_length(page);
  476. if (len == 0)
  477. return nfs_return_empty_page(page);
  478. new = nfs_create_request(desc->ctx, inode, page, 0, len);
  479. if (IS_ERR(new)) {
  480. SetPageError(page);
  481. unlock_page(page);
  482. return PTR_ERR(new);
  483. }
  484. if (len < PAGE_CACHE_SIZE)
  485. memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
  486. nfs_list_add_request(new, desc->head);
  487. return 0;
  488. }
  489. int nfs_readpages(struct file *filp, struct address_space *mapping,
  490. struct list_head *pages, unsigned nr_pages)
  491. {
  492. LIST_HEAD(head);
  493. struct nfs_readdesc desc = {
  494. .head = &head,
  495. };
  496. struct inode *inode = mapping->host;
  497. struct nfs_server *server = NFS_SERVER(inode);
  498. int ret = -ESTALE;
  499. dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
  500. inode->i_sb->s_id,
  501. (long long)NFS_FILEID(inode),
  502. nr_pages);
  503. nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
  504. if (NFS_STALE(inode))
  505. goto out;
  506. if (filp == NULL) {
  507. desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  508. if (desc.ctx == NULL)
  509. return -EBADF;
  510. } else
  511. desc.ctx = get_nfs_open_context((struct nfs_open_context *)
  512. filp->private_data);
  513. ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
  514. if (!list_empty(&head)) {
  515. int err = nfs_pagein_list(&head, server->rpages);
  516. if (!ret)
  517. nfs_add_stats(inode, NFSIOS_READPAGES, err);
  518. ret = err;
  519. }
  520. put_nfs_open_context(desc.ctx);
  521. out:
  522. return ret;
  523. }
  524. int __init nfs_init_readpagecache(void)
  525. {
  526. nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
  527. sizeof(struct nfs_read_data),
  528. 0, SLAB_HWCACHE_ALIGN,
  529. NULL, NULL);
  530. if (nfs_rdata_cachep == NULL)
  531. return -ENOMEM;
  532. nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
  533. nfs_rdata_cachep);
  534. if (nfs_rdata_mempool == NULL)
  535. return -ENOMEM;
  536. return 0;
  537. }
  538. void nfs_destroy_readpagecache(void)
  539. {
  540. mempool_destroy(nfs_rdata_mempool);
  541. kmem_cache_destroy(nfs_rdata_cachep);
  542. }