file.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* AFS filesystem file handling
  3. *
  4. * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/fs.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/writeback.h>
  13. #include <linux/gfp.h>
  14. #include <linux/task_io_accounting_ops.h>
  15. #include <linux/mm.h>
  16. #include "internal.h"
  17. static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
  18. static int afs_readpage(struct file *file, struct page *page);
  19. static void afs_invalidatepage(struct page *page, unsigned int offset,
  20. unsigned int length);
  21. static int afs_releasepage(struct page *page, gfp_t gfp_flags);
  22. static int afs_readpages(struct file *filp, struct address_space *mapping,
  23. struct list_head *pages, unsigned nr_pages);
  24. const struct file_operations afs_file_operations = {
  25. .open = afs_open,
  26. .release = afs_release,
  27. .llseek = generic_file_llseek,
  28. .read_iter = generic_file_read_iter,
  29. .write_iter = afs_file_write,
  30. .mmap = afs_file_mmap,
  31. .splice_read = generic_file_splice_read,
  32. .splice_write = iter_file_splice_write,
  33. .fsync = afs_fsync,
  34. .lock = afs_lock,
  35. .flock = afs_flock,
  36. };
  37. const struct inode_operations afs_file_inode_operations = {
  38. .getattr = afs_getattr,
  39. .setattr = afs_setattr,
  40. .permission = afs_permission,
  41. };
  42. const struct address_space_operations afs_fs_aops = {
  43. .readpage = afs_readpage,
  44. .readpages = afs_readpages,
  45. .set_page_dirty = afs_set_page_dirty,
  46. .launder_page = afs_launder_page,
  47. .releasepage = afs_releasepage,
  48. .invalidatepage = afs_invalidatepage,
  49. .write_begin = afs_write_begin,
  50. .write_end = afs_write_end,
  51. .writepage = afs_writepage,
  52. .writepages = afs_writepages,
  53. };
  54. static const struct vm_operations_struct afs_vm_ops = {
  55. .fault = filemap_fault,
  56. .map_pages = filemap_map_pages,
  57. .page_mkwrite = afs_page_mkwrite,
  58. };
  59. /*
  60. * Discard a pin on a writeback key.
  61. */
  62. void afs_put_wb_key(struct afs_wb_key *wbk)
  63. {
  64. if (wbk && refcount_dec_and_test(&wbk->usage)) {
  65. key_put(wbk->key);
  66. kfree(wbk);
  67. }
  68. }
  69. /*
  70. * Cache key for writeback.
  71. */
  72. int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af)
  73. {
  74. struct afs_wb_key *wbk, *p;
  75. wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL);
  76. if (!wbk)
  77. return -ENOMEM;
  78. refcount_set(&wbk->usage, 2);
  79. wbk->key = af->key;
  80. spin_lock(&vnode->wb_lock);
  81. list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
  82. if (p->key == wbk->key)
  83. goto found;
  84. }
  85. key_get(wbk->key);
  86. list_add_tail(&wbk->vnode_link, &vnode->wb_keys);
  87. spin_unlock(&vnode->wb_lock);
  88. af->wb = wbk;
  89. return 0;
  90. found:
  91. refcount_inc(&p->usage);
  92. spin_unlock(&vnode->wb_lock);
  93. af->wb = p;
  94. kfree(wbk);
  95. return 0;
  96. }
  97. /*
  98. * open an AFS file or directory and attach a key to it
  99. */
  100. int afs_open(struct inode *inode, struct file *file)
  101. {
  102. struct afs_vnode *vnode = AFS_FS_I(inode);
  103. struct afs_file *af;
  104. struct key *key;
  105. int ret;
  106. _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
  107. key = afs_request_key(vnode->volume->cell);
  108. if (IS_ERR(key)) {
  109. ret = PTR_ERR(key);
  110. goto error;
  111. }
  112. af = kzalloc(sizeof(*af), GFP_KERNEL);
  113. if (!af) {
  114. ret = -ENOMEM;
  115. goto error_key;
  116. }
  117. af->key = key;
  118. ret = afs_validate(vnode, key);
  119. if (ret < 0)
  120. goto error_af;
  121. if (file->f_mode & FMODE_WRITE) {
  122. ret = afs_cache_wb_key(vnode, af);
  123. if (ret < 0)
  124. goto error_af;
  125. }
  126. if (file->f_flags & O_TRUNC)
  127. set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
  128. file->private_data = af;
  129. _leave(" = 0");
  130. return 0;
  131. error_af:
  132. kfree(af);
  133. error_key:
  134. key_put(key);
  135. error:
  136. _leave(" = %d", ret);
  137. return ret;
  138. }
  139. /*
  140. * release an AFS file or directory and discard its key
  141. */
  142. int afs_release(struct inode *inode, struct file *file)
  143. {
  144. struct afs_vnode *vnode = AFS_FS_I(inode);
  145. struct afs_file *af = file->private_data;
  146. int ret = 0;
  147. _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
  148. if ((file->f_mode & FMODE_WRITE))
  149. ret = vfs_fsync(file, 0);
  150. file->private_data = NULL;
  151. if (af->wb)
  152. afs_put_wb_key(af->wb);
  153. key_put(af->key);
  154. kfree(af);
  155. afs_prune_wb_keys(vnode);
  156. _leave(" = %d", ret);
  157. return ret;
  158. }
  159. /*
  160. * Dispose of a ref to a read record.
  161. */
  162. void afs_put_read(struct afs_read *req)
  163. {
  164. int i;
  165. if (refcount_dec_and_test(&req->usage)) {
  166. if (req->pages) {
  167. for (i = 0; i < req->nr_pages; i++)
  168. if (req->pages[i])
  169. put_page(req->pages[i]);
  170. if (req->pages != req->array)
  171. kfree(req->pages);
  172. }
  173. kfree(req);
  174. }
  175. }
  176. #ifdef CONFIG_AFS_FSCACHE
  177. /*
  178. * deal with notification that a page was read from the cache
  179. */
  180. static void afs_file_readpage_read_complete(struct page *page,
  181. void *data,
  182. int error)
  183. {
  184. _enter("%p,%p,%d", page, data, error);
  185. /* if the read completes with an error, we just unlock the page and let
  186. * the VM reissue the readpage */
  187. if (!error)
  188. SetPageUptodate(page);
  189. unlock_page(page);
  190. }
  191. #endif
  192. static void afs_fetch_data_success(struct afs_operation *op)
  193. {
  194. struct afs_vnode *vnode = op->file[0].vnode;
  195. _enter("op=%08x", op->debug_id);
  196. afs_vnode_commit_status(op, &op->file[0]);
  197. afs_stat_v(vnode, n_fetches);
  198. atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
  199. }
  200. static void afs_fetch_data_put(struct afs_operation *op)
  201. {
  202. afs_put_read(op->fetch.req);
  203. }
  204. static const struct afs_operation_ops afs_fetch_data_operation = {
  205. .issue_afs_rpc = afs_fs_fetch_data,
  206. .issue_yfs_rpc = yfs_fs_fetch_data,
  207. .success = afs_fetch_data_success,
  208. .aborted = afs_check_for_remote_deletion,
  209. .put = afs_fetch_data_put,
  210. };
  211. /*
  212. * Fetch file data from the volume.
  213. */
  214. int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
  215. {
  216. struct afs_operation *op;
  217. _enter("%s{%llx:%llu.%u},%x,,,",
  218. vnode->volume->name,
  219. vnode->fid.vid,
  220. vnode->fid.vnode,
  221. vnode->fid.unique,
  222. key_serial(key));
  223. op = afs_alloc_operation(key, vnode->volume);
  224. if (IS_ERR(op))
  225. return PTR_ERR(op);
  226. afs_op_set_vnode(op, 0, vnode);
  227. op->fetch.req = afs_get_read(req);
  228. op->ops = &afs_fetch_data_operation;
  229. return afs_do_sync_operation(op);
  230. }
  231. /*
  232. * read page from file, directory or symlink, given a key to use
  233. */
  234. int afs_page_filler(void *data, struct page *page)
  235. {
  236. struct inode *inode = page->mapping->host;
  237. struct afs_vnode *vnode = AFS_FS_I(inode);
  238. struct afs_read *req;
  239. struct key *key = data;
  240. int ret;
  241. _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
  242. BUG_ON(!PageLocked(page));
  243. ret = -ESTALE;
  244. if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  245. goto error;
  246. /* is it cached? */
  247. #ifdef CONFIG_AFS_FSCACHE
  248. ret = fscache_read_or_alloc_page(vnode->cache,
  249. page,
  250. afs_file_readpage_read_complete,
  251. NULL,
  252. GFP_KERNEL);
  253. #else
  254. ret = -ENOBUFS;
  255. #endif
  256. switch (ret) {
  257. /* read BIO submitted (page in cache) */
  258. case 0:
  259. break;
  260. /* page not yet cached */
  261. case -ENODATA:
  262. _debug("cache said ENODATA");
  263. goto go_on;
  264. /* page will not be cached */
  265. case -ENOBUFS:
  266. _debug("cache said ENOBUFS");
  267. fallthrough;
  268. default:
  269. go_on:
  270. req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
  271. if (!req)
  272. goto enomem;
  273. /* We request a full page. If the page is a partial one at the
  274. * end of the file, the server will return a short read and the
  275. * unmarshalling code will clear the unfilled space.
  276. */
  277. refcount_set(&req->usage, 1);
  278. req->pos = (loff_t)page->index << PAGE_SHIFT;
  279. req->len = PAGE_SIZE;
  280. req->nr_pages = 1;
  281. req->pages = req->array;
  282. req->pages[0] = page;
  283. get_page(page);
  284. /* read the contents of the file from the server into the
  285. * page */
  286. ret = afs_fetch_data(vnode, key, req);
  287. afs_put_read(req);
  288. if (ret < 0) {
  289. if (ret == -ENOENT) {
  290. _debug("got NOENT from server"
  291. " - marking file deleted and stale");
  292. set_bit(AFS_VNODE_DELETED, &vnode->flags);
  293. ret = -ESTALE;
  294. }
  295. #ifdef CONFIG_AFS_FSCACHE
  296. fscache_uncache_page(vnode->cache, page);
  297. #endif
  298. BUG_ON(PageFsCache(page));
  299. if (ret == -EINTR ||
  300. ret == -ENOMEM ||
  301. ret == -ERESTARTSYS ||
  302. ret == -EAGAIN)
  303. goto error;
  304. goto io_error;
  305. }
  306. SetPageUptodate(page);
  307. /* send the page to the cache */
  308. #ifdef CONFIG_AFS_FSCACHE
  309. if (PageFsCache(page) &&
  310. fscache_write_page(vnode->cache, page, vnode->status.size,
  311. GFP_KERNEL) != 0) {
  312. fscache_uncache_page(vnode->cache, page);
  313. BUG_ON(PageFsCache(page));
  314. }
  315. #endif
  316. unlock_page(page);
  317. }
  318. _leave(" = 0");
  319. return 0;
  320. io_error:
  321. SetPageError(page);
  322. goto error;
  323. enomem:
  324. ret = -ENOMEM;
  325. error:
  326. unlock_page(page);
  327. _leave(" = %d", ret);
  328. return ret;
  329. }
  330. /*
  331. * read page from file, directory or symlink, given a file to nominate the key
  332. * to be used
  333. */
  334. static int afs_readpage(struct file *file, struct page *page)
  335. {
  336. struct key *key;
  337. int ret;
  338. if (file) {
  339. key = afs_file_key(file);
  340. ASSERT(key != NULL);
  341. ret = afs_page_filler(key, page);
  342. } else {
  343. struct inode *inode = page->mapping->host;
  344. key = afs_request_key(AFS_FS_S(inode->i_sb)->cell);
  345. if (IS_ERR(key)) {
  346. ret = PTR_ERR(key);
  347. } else {
  348. ret = afs_page_filler(key, page);
  349. key_put(key);
  350. }
  351. }
  352. return ret;
  353. }
  354. /*
  355. * Make pages available as they're filled.
  356. */
  357. static void afs_readpages_page_done(struct afs_read *req)
  358. {
  359. #ifdef CONFIG_AFS_FSCACHE
  360. struct afs_vnode *vnode = req->vnode;
  361. #endif
  362. struct page *page = req->pages[req->index];
  363. req->pages[req->index] = NULL;
  364. SetPageUptodate(page);
  365. /* send the page to the cache */
  366. #ifdef CONFIG_AFS_FSCACHE
  367. if (PageFsCache(page) &&
  368. fscache_write_page(vnode->cache, page, vnode->status.size,
  369. GFP_KERNEL) != 0) {
  370. fscache_uncache_page(vnode->cache, page);
  371. BUG_ON(PageFsCache(page));
  372. }
  373. #endif
  374. unlock_page(page);
  375. put_page(page);
  376. }
  377. /*
  378. * Read a contiguous set of pages.
  379. */
  380. static int afs_readpages_one(struct file *file, struct address_space *mapping,
  381. struct list_head *pages)
  382. {
  383. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  384. struct afs_read *req;
  385. struct list_head *p;
  386. struct page *first, *page;
  387. struct key *key = afs_file_key(file);
  388. pgoff_t index;
  389. int ret, n, i;
  390. /* Count the number of contiguous pages at the front of the list. Note
  391. * that the list goes prev-wards rather than next-wards.
  392. */
  393. first = lru_to_page(pages);
  394. index = first->index + 1;
  395. n = 1;
  396. for (p = first->lru.prev; p != pages; p = p->prev) {
  397. page = list_entry(p, struct page, lru);
  398. if (page->index != index)
  399. break;
  400. index++;
  401. n++;
  402. }
  403. req = kzalloc(struct_size(req, array, n), GFP_NOFS);
  404. if (!req)
  405. return -ENOMEM;
  406. refcount_set(&req->usage, 1);
  407. req->vnode = vnode;
  408. req->page_done = afs_readpages_page_done;
  409. req->pos = first->index;
  410. req->pos <<= PAGE_SHIFT;
  411. req->pages = req->array;
  412. /* Transfer the pages to the request. We add them in until one fails
  413. * to add to the LRU and then we stop (as that'll make a hole in the
  414. * contiguous run.
  415. *
  416. * Note that it's possible for the file size to change whilst we're
  417. * doing this, but we rely on the server returning less than we asked
  418. * for if the file shrank. We also rely on this to deal with a partial
  419. * page at the end of the file.
  420. */
  421. do {
  422. page = lru_to_page(pages);
  423. list_del(&page->lru);
  424. index = page->index;
  425. if (add_to_page_cache_lru(page, mapping, index,
  426. readahead_gfp_mask(mapping))) {
  427. #ifdef CONFIG_AFS_FSCACHE
  428. fscache_uncache_page(vnode->cache, page);
  429. #endif
  430. put_page(page);
  431. break;
  432. }
  433. req->pages[req->nr_pages++] = page;
  434. req->len += PAGE_SIZE;
  435. } while (req->nr_pages < n);
  436. if (req->nr_pages == 0) {
  437. kfree(req);
  438. return 0;
  439. }
  440. ret = afs_fetch_data(vnode, key, req);
  441. if (ret < 0)
  442. goto error;
  443. task_io_account_read(PAGE_SIZE * req->nr_pages);
  444. afs_put_read(req);
  445. return 0;
  446. error:
  447. if (ret == -ENOENT) {
  448. _debug("got NOENT from server"
  449. " - marking file deleted and stale");
  450. set_bit(AFS_VNODE_DELETED, &vnode->flags);
  451. ret = -ESTALE;
  452. }
  453. for (i = 0; i < req->nr_pages; i++) {
  454. page = req->pages[i];
  455. if (page) {
  456. #ifdef CONFIG_AFS_FSCACHE
  457. fscache_uncache_page(vnode->cache, page);
  458. #endif
  459. SetPageError(page);
  460. unlock_page(page);
  461. }
  462. }
  463. afs_put_read(req);
  464. return ret;
  465. }
  466. /*
  467. * read a set of pages
  468. */
  469. static int afs_readpages(struct file *file, struct address_space *mapping,
  470. struct list_head *pages, unsigned nr_pages)
  471. {
  472. struct key *key = afs_file_key(file);
  473. struct afs_vnode *vnode;
  474. int ret = 0;
  475. _enter("{%d},{%lu},,%d",
  476. key_serial(key), mapping->host->i_ino, nr_pages);
  477. ASSERT(key != NULL);
  478. vnode = AFS_FS_I(mapping->host);
  479. if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
  480. _leave(" = -ESTALE");
  481. return -ESTALE;
  482. }
  483. /* attempt to read as many of the pages as possible */
  484. #ifdef CONFIG_AFS_FSCACHE
  485. ret = fscache_read_or_alloc_pages(vnode->cache,
  486. mapping,
  487. pages,
  488. &nr_pages,
  489. afs_file_readpage_read_complete,
  490. NULL,
  491. mapping_gfp_mask(mapping));
  492. #else
  493. ret = -ENOBUFS;
  494. #endif
  495. switch (ret) {
  496. /* all pages are being read from the cache */
  497. case 0:
  498. BUG_ON(!list_empty(pages));
  499. BUG_ON(nr_pages != 0);
  500. _leave(" = 0 [reading all]");
  501. return 0;
  502. /* there were pages that couldn't be read from the cache */
  503. case -ENODATA:
  504. case -ENOBUFS:
  505. break;
  506. /* other error */
  507. default:
  508. _leave(" = %d", ret);
  509. return ret;
  510. }
  511. while (!list_empty(pages)) {
  512. ret = afs_readpages_one(file, mapping, pages);
  513. if (ret < 0)
  514. break;
  515. }
  516. _leave(" = %d [netting]", ret);
  517. return ret;
  518. }
  519. /*
  520. * Adjust the dirty region of the page on truncation or full invalidation,
  521. * getting rid of the markers altogether if the region is entirely invalidated.
  522. */
  523. static void afs_invalidate_dirty(struct page *page, unsigned int offset,
  524. unsigned int length)
  525. {
  526. struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
  527. unsigned long priv;
  528. unsigned int f, t, end = offset + length;
  529. priv = page_private(page);
  530. /* we clean up only if the entire page is being invalidated */
  531. if (offset == 0 && length == thp_size(page))
  532. goto full_invalidate;
  533. /* If the page was dirtied by page_mkwrite(), the PTE stays writable
  534. * and we don't get another notification to tell us to expand it
  535. * again.
  536. */
  537. if (afs_is_page_dirty_mmapped(priv))
  538. return;
  539. /* We may need to shorten the dirty region */
  540. f = afs_page_dirty_from(priv);
  541. t = afs_page_dirty_to(priv);
  542. if (t <= offset || f >= end)
  543. return; /* Doesn't overlap */
  544. if (f < offset && t > end)
  545. return; /* Splits the dirty region - just absorb it */
  546. if (f >= offset && t <= end)
  547. goto undirty;
  548. if (f < offset)
  549. t = offset;
  550. else
  551. f = end;
  552. if (f == t)
  553. goto undirty;
  554. priv = afs_page_dirty(f, t);
  555. set_page_private(page, priv);
  556. trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
  557. return;
  558. undirty:
  559. trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
  560. clear_page_dirty_for_io(page);
  561. full_invalidate:
  562. priv = (unsigned long)detach_page_private(page);
  563. trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
  564. }
  565. /*
  566. * invalidate part or all of a page
  567. * - release a page and clean up its private data if offset is 0 (indicating
  568. * the entire page)
  569. */
  570. static void afs_invalidatepage(struct page *page, unsigned int offset,
  571. unsigned int length)
  572. {
  573. _enter("{%lu},%u,%u", page->index, offset, length);
  574. BUG_ON(!PageLocked(page));
  575. #ifdef CONFIG_AFS_FSCACHE
  576. /* we clean up only if the entire page is being invalidated */
  577. if (offset == 0 && length == PAGE_SIZE) {
  578. if (PageFsCache(page)) {
  579. struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
  580. fscache_wait_on_page_write(vnode->cache, page);
  581. fscache_uncache_page(vnode->cache, page);
  582. }
  583. }
  584. #endif
  585. if (PagePrivate(page))
  586. afs_invalidate_dirty(page, offset, length);
  587. _leave("");
  588. }
  589. /*
  590. * release a page and clean up its private state if it's not busy
  591. * - return true if the page can now be released, false if not
  592. */
  593. static int afs_releasepage(struct page *page, gfp_t gfp_flags)
  594. {
  595. struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
  596. unsigned long priv;
  597. _enter("{{%llx:%llu}[%lu],%lx},%x",
  598. vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
  599. gfp_flags);
  600. /* deny if page is being written to the cache and the caller hasn't
  601. * elected to wait */
  602. #ifdef CONFIG_AFS_FSCACHE
  603. if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
  604. _leave(" = F [cache busy]");
  605. return 0;
  606. }
  607. #endif
  608. if (PagePrivate(page)) {
  609. priv = (unsigned long)detach_page_private(page);
  610. trace_afs_page_dirty(vnode, tracepoint_string("rel"),
  611. page->index, priv);
  612. }
  613. /* indicate that the page can be released */
  614. _leave(" = T");
  615. return 1;
  616. }
  617. /*
  618. * Handle setting up a memory mapping on an AFS file.
  619. */
  620. static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
  621. {
  622. int ret;
  623. ret = generic_file_mmap(file, vma);
  624. if (ret == 0)
  625. vma->vm_ops = &afs_vm_ops;
  626. return ret;
  627. }