filecache.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. /*
  2. * Open file cache.
  3. *
  4. * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
  5. */
  6. #include <linux/hash.h>
  7. #include <linux/slab.h>
  8. #include <linux/file.h>
  9. #include <linux/sched.h>
  10. #include <linux/list_lru.h>
  11. #include <linux/fsnotify_backend.h>
  12. #include <linux/fsnotify.h>
  13. #include <linux/seq_file.h>
  14. #include "vfs.h"
  15. #include "nfsd.h"
  16. #include "nfsfh.h"
  17. #include "netns.h"
  18. #include "filecache.h"
  19. #include "trace.h"
  20. #define NFSDDBG_FACILITY NFSDDBG_FH
  21. /* FIXME: dynamically size this for the machine somehow? */
  22. #define NFSD_FILE_HASH_BITS 12
  23. #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
  24. #define NFSD_LAUNDRETTE_DELAY (2 * HZ)
  25. #define NFSD_FILE_SHUTDOWN (1)
  26. #define NFSD_FILE_LRU_THRESHOLD (4096UL)
  27. #define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
  28. /* We only care about NFSD_MAY_READ/WRITE for this cache */
  29. #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
  30. struct nfsd_fcache_bucket {
  31. struct hlist_head nfb_head;
  32. spinlock_t nfb_lock;
  33. unsigned int nfb_count;
  34. unsigned int nfb_maxcount;
  35. };
  36. static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
  37. struct nfsd_fcache_disposal {
  38. struct list_head list;
  39. struct work_struct work;
  40. struct net *net;
  41. spinlock_t lock;
  42. struct list_head freeme;
  43. struct rcu_head rcu;
  44. };
  45. static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
  46. static struct kmem_cache *nfsd_file_slab;
  47. static struct kmem_cache *nfsd_file_mark_slab;
  48. static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
  49. static struct list_lru nfsd_file_lru;
  50. static long nfsd_file_lru_flags;
  51. static struct fsnotify_group *nfsd_file_fsnotify_group;
  52. static atomic_long_t nfsd_filecache_count;
  53. static struct delayed_work nfsd_filecache_laundrette;
  54. static DEFINE_SPINLOCK(laundrette_lock);
  55. static LIST_HEAD(laundrettes);
  56. static void nfsd_file_gc(void);
  57. static void
  58. nfsd_file_schedule_laundrette(void)
  59. {
  60. long count = atomic_long_read(&nfsd_filecache_count);
  61. if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
  62. return;
  63. queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
  64. NFSD_LAUNDRETTE_DELAY);
  65. }
  66. static void
  67. nfsd_file_slab_free(struct rcu_head *rcu)
  68. {
  69. struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
  70. put_cred(nf->nf_cred);
  71. kmem_cache_free(nfsd_file_slab, nf);
  72. }
  73. static void
  74. nfsd_file_mark_free(struct fsnotify_mark *mark)
  75. {
  76. struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
  77. nfm_mark);
  78. kmem_cache_free(nfsd_file_mark_slab, nfm);
  79. }
  80. static struct nfsd_file_mark *
  81. nfsd_file_mark_get(struct nfsd_file_mark *nfm)
  82. {
  83. if (!refcount_inc_not_zero(&nfm->nfm_ref))
  84. return NULL;
  85. return nfm;
  86. }
  87. static void
  88. nfsd_file_mark_put(struct nfsd_file_mark *nfm)
  89. {
  90. if (refcount_dec_and_test(&nfm->nfm_ref)) {
  91. fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
  92. fsnotify_put_mark(&nfm->nfm_mark);
  93. }
  94. }
  95. static struct nfsd_file_mark *
  96. nfsd_file_mark_find_or_create(struct nfsd_file *nf)
  97. {
  98. int err;
  99. struct fsnotify_mark *mark;
  100. struct nfsd_file_mark *nfm = NULL, *new;
  101. struct inode *inode = nf->nf_inode;
  102. do {
  103. mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
  104. mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
  105. nfsd_file_fsnotify_group);
  106. if (mark) {
  107. nfm = nfsd_file_mark_get(container_of(mark,
  108. struct nfsd_file_mark,
  109. nfm_mark));
  110. mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
  111. if (nfm) {
  112. fsnotify_put_mark(mark);
  113. break;
  114. }
  115. /* Avoid soft lockup race with nfsd_file_mark_put() */
  116. fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
  117. fsnotify_put_mark(mark);
  118. } else
  119. mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
  120. /* allocate a new nfm */
  121. new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
  122. if (!new)
  123. return NULL;
  124. fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
  125. new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
  126. refcount_set(&new->nfm_ref, 1);
  127. err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
  128. /*
  129. * If the add was successful, then return the object.
  130. * Otherwise, we need to put the reference we hold on the
  131. * nfm_mark. The fsnotify code will take a reference and put
  132. * it on failure, so we can't just free it directly. It's also
  133. * not safe to call fsnotify_destroy_mark on it as the
  134. * mark->group will be NULL. Thus, we can't let the nfm_ref
  135. * counter drive the destruction at this point.
  136. */
  137. if (likely(!err))
  138. nfm = new;
  139. else
  140. fsnotify_put_mark(&new->nfm_mark);
  141. } while (unlikely(err == -EEXIST));
  142. return nfm;
  143. }
  144. static struct nfsd_file *
  145. nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
  146. struct net *net)
  147. {
  148. struct nfsd_file *nf;
  149. nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
  150. if (nf) {
  151. INIT_HLIST_NODE(&nf->nf_node);
  152. INIT_LIST_HEAD(&nf->nf_lru);
  153. nf->nf_file = NULL;
  154. nf->nf_cred = get_current_cred();
  155. nf->nf_net = net;
  156. nf->nf_flags = 0;
  157. nf->nf_inode = inode;
  158. nf->nf_hashval = hashval;
  159. refcount_set(&nf->nf_ref, 1);
  160. nf->nf_may = may & NFSD_FILE_MAY_MASK;
  161. if (may & NFSD_MAY_NOT_BREAK_LEASE) {
  162. if (may & NFSD_MAY_WRITE)
  163. __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
  164. if (may & NFSD_MAY_READ)
  165. __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
  166. }
  167. nf->nf_mark = NULL;
  168. init_rwsem(&nf->nf_rwsem);
  169. trace_nfsd_file_alloc(nf);
  170. }
  171. return nf;
  172. }
  173. static bool
  174. nfsd_file_free(struct nfsd_file *nf)
  175. {
  176. bool flush = false;
  177. trace_nfsd_file_put_final(nf);
  178. if (nf->nf_mark)
  179. nfsd_file_mark_put(nf->nf_mark);
  180. if (nf->nf_file) {
  181. get_file(nf->nf_file);
  182. filp_close(nf->nf_file, NULL);
  183. fput(nf->nf_file);
  184. flush = true;
  185. }
  186. call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
  187. return flush;
  188. }
  189. static bool
  190. nfsd_file_check_writeback(struct nfsd_file *nf)
  191. {
  192. struct file *file = nf->nf_file;
  193. struct address_space *mapping;
  194. if (!file || !(file->f_mode & FMODE_WRITE))
  195. return false;
  196. mapping = file->f_mapping;
  197. return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
  198. mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
  199. }
  200. static int
  201. nfsd_file_check_write_error(struct nfsd_file *nf)
  202. {
  203. struct file *file = nf->nf_file;
  204. if (!file || !(file->f_mode & FMODE_WRITE))
  205. return 0;
  206. return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
  207. }
  208. static void
  209. nfsd_file_do_unhash(struct nfsd_file *nf)
  210. {
  211. lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
  212. trace_nfsd_file_unhash(nf);
  213. if (nfsd_file_check_write_error(nf))
  214. nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
  215. --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
  216. hlist_del_rcu(&nf->nf_node);
  217. atomic_long_dec(&nfsd_filecache_count);
  218. }
  219. static bool
  220. nfsd_file_unhash(struct nfsd_file *nf)
  221. {
  222. if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
  223. nfsd_file_do_unhash(nf);
  224. if (!list_empty(&nf->nf_lru))
  225. list_lru_del(&nfsd_file_lru, &nf->nf_lru);
  226. return true;
  227. }
  228. return false;
  229. }
  230. /*
  231. * Return true if the file was unhashed.
  232. */
  233. static bool
  234. nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
  235. {
  236. lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
  237. trace_nfsd_file_unhash_and_release_locked(nf);
  238. if (!nfsd_file_unhash(nf))
  239. return false;
  240. /* keep final reference for nfsd_file_lru_dispose */
  241. if (refcount_dec_not_one(&nf->nf_ref))
  242. return true;
  243. list_add(&nf->nf_lru, dispose);
  244. return true;
  245. }
  246. static void
  247. nfsd_file_put_noref(struct nfsd_file *nf)
  248. {
  249. trace_nfsd_file_put(nf);
  250. if (refcount_dec_and_test(&nf->nf_ref)) {
  251. WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
  252. nfsd_file_free(nf);
  253. }
  254. }
  255. void
  256. nfsd_file_put(struct nfsd_file *nf)
  257. {
  258. bool is_hashed;
  259. set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
  260. if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
  261. nfsd_file_put_noref(nf);
  262. return;
  263. }
  264. filemap_flush(nf->nf_file->f_mapping);
  265. is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
  266. nfsd_file_put_noref(nf);
  267. if (is_hashed)
  268. nfsd_file_schedule_laundrette();
  269. if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
  270. nfsd_file_gc();
  271. }
  272. struct nfsd_file *
  273. nfsd_file_get(struct nfsd_file *nf)
  274. {
  275. if (likely(refcount_inc_not_zero(&nf->nf_ref)))
  276. return nf;
  277. return NULL;
  278. }
  279. static void
  280. nfsd_file_dispose_list(struct list_head *dispose)
  281. {
  282. struct nfsd_file *nf;
  283. while(!list_empty(dispose)) {
  284. nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
  285. list_del(&nf->nf_lru);
  286. nfsd_file_put_noref(nf);
  287. }
  288. }
  289. static void
  290. nfsd_file_dispose_list_sync(struct list_head *dispose)
  291. {
  292. bool flush = false;
  293. struct nfsd_file *nf;
  294. while(!list_empty(dispose)) {
  295. nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
  296. list_del(&nf->nf_lru);
  297. if (!refcount_dec_and_test(&nf->nf_ref))
  298. continue;
  299. if (nfsd_file_free(nf))
  300. flush = true;
  301. }
  302. if (flush)
  303. flush_delayed_fput();
  304. }
  305. static void
  306. nfsd_file_list_remove_disposal(struct list_head *dst,
  307. struct nfsd_fcache_disposal *l)
  308. {
  309. spin_lock(&l->lock);
  310. list_splice_init(&l->freeme, dst);
  311. spin_unlock(&l->lock);
  312. }
  313. static void
  314. nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
  315. {
  316. struct nfsd_fcache_disposal *l;
  317. rcu_read_lock();
  318. list_for_each_entry_rcu(l, &laundrettes, list) {
  319. if (l->net == net) {
  320. spin_lock(&l->lock);
  321. list_splice_tail_init(files, &l->freeme);
  322. spin_unlock(&l->lock);
  323. queue_work(nfsd_filecache_wq, &l->work);
  324. break;
  325. }
  326. }
  327. rcu_read_unlock();
  328. }
  329. static void
  330. nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
  331. struct net *net)
  332. {
  333. struct nfsd_file *nf, *tmp;
  334. list_for_each_entry_safe(nf, tmp, src, nf_lru) {
  335. if (nf->nf_net == net)
  336. list_move_tail(&nf->nf_lru, dst);
  337. }
  338. }
  339. static void
  340. nfsd_file_dispose_list_delayed(struct list_head *dispose)
  341. {
  342. LIST_HEAD(list);
  343. struct nfsd_file *nf;
  344. while(!list_empty(dispose)) {
  345. nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
  346. nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
  347. nfsd_file_list_add_disposal(&list, nf->nf_net);
  348. }
  349. }
  350. /*
  351. * Note this can deadlock with nfsd_file_cache_purge.
  352. */
  353. static enum lru_status
  354. nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
  355. spinlock_t *lock, void *arg)
  356. __releases(lock)
  357. __acquires(lock)
  358. {
  359. struct list_head *head = arg;
  360. struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
  361. /*
  362. * Do a lockless refcount check. The hashtable holds one reference, so
  363. * we look to see if anything else has a reference, or if any have
  364. * been put since the shrinker last ran. Those don't get unhashed and
  365. * released.
  366. *
  367. * Note that in the put path, we set the flag and then decrement the
  368. * counter. Here we check the counter and then test and clear the flag.
  369. * That order is deliberate to ensure that we can do this locklessly.
  370. */
  371. if (refcount_read(&nf->nf_ref) > 1)
  372. goto out_skip;
  373. /*
  374. * Don't throw out files that are still undergoing I/O or
  375. * that have uncleared errors pending.
  376. */
  377. if (nfsd_file_check_writeback(nf))
  378. goto out_skip;
  379. if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
  380. goto out_skip;
  381. if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
  382. goto out_skip;
  383. list_lru_isolate_move(lru, &nf->nf_lru, head);
  384. return LRU_REMOVED;
  385. out_skip:
  386. return LRU_SKIP;
  387. }
  388. static unsigned long
  389. nfsd_file_lru_walk_list(struct shrink_control *sc)
  390. {
  391. LIST_HEAD(head);
  392. struct nfsd_file *nf;
  393. unsigned long ret;
  394. if (sc)
  395. ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
  396. nfsd_file_lru_cb, &head);
  397. else
  398. ret = list_lru_walk(&nfsd_file_lru,
  399. nfsd_file_lru_cb,
  400. &head, LONG_MAX);
  401. list_for_each_entry(nf, &head, nf_lru) {
  402. spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
  403. nfsd_file_do_unhash(nf);
  404. spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
  405. }
  406. nfsd_file_dispose_list_delayed(&head);
  407. return ret;
  408. }
  409. static void
  410. nfsd_file_gc(void)
  411. {
  412. nfsd_file_lru_walk_list(NULL);
  413. }
  414. static void
  415. nfsd_file_gc_worker(struct work_struct *work)
  416. {
  417. nfsd_file_gc();
  418. nfsd_file_schedule_laundrette();
  419. }
  420. static unsigned long
  421. nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
  422. {
  423. return list_lru_count(&nfsd_file_lru);
  424. }
  425. static unsigned long
  426. nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
  427. {
  428. return nfsd_file_lru_walk_list(sc);
  429. }
  430. static struct shrinker nfsd_file_shrinker = {
  431. .scan_objects = nfsd_file_lru_scan,
  432. .count_objects = nfsd_file_lru_count,
  433. .seeks = 1,
  434. };
  435. static void
  436. __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
  437. struct list_head *dispose)
  438. {
  439. struct nfsd_file *nf;
  440. struct hlist_node *tmp;
  441. spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
  442. hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
  443. if (inode == nf->nf_inode)
  444. nfsd_file_unhash_and_release_locked(nf, dispose);
  445. }
  446. spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
  447. }
  448. /**
  449. * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
  450. * @inode: inode of the file to attempt to remove
  451. *
  452. * Walk the whole hash bucket, looking for any files that correspond to "inode".
  453. * If any do, then unhash them and put the hashtable reference to them and
  454. * destroy any that had their last reference put. Also ensure that any of the
  455. * fputs also have their final __fput done as well.
  456. */
  457. void
  458. nfsd_file_close_inode_sync(struct inode *inode)
  459. {
  460. unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
  461. NFSD_FILE_HASH_BITS);
  462. LIST_HEAD(dispose);
  463. __nfsd_file_close_inode(inode, hashval, &dispose);
  464. trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
  465. nfsd_file_dispose_list_sync(&dispose);
  466. }
  467. /**
  468. * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
  469. * @inode: inode of the file to attempt to remove
  470. *
  471. * Walk the whole hash bucket, looking for any files that correspond to "inode".
  472. * If any do, then unhash them and put the hashtable reference to them and
  473. * destroy any that had their last reference put.
  474. */
  475. static void
  476. nfsd_file_close_inode(struct inode *inode)
  477. {
  478. unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
  479. NFSD_FILE_HASH_BITS);
  480. LIST_HEAD(dispose);
  481. __nfsd_file_close_inode(inode, hashval, &dispose);
  482. trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
  483. nfsd_file_dispose_list_delayed(&dispose);
  484. }
  485. /**
  486. * nfsd_file_delayed_close - close unused nfsd_files
  487. * @work: dummy
  488. *
  489. * Walk the LRU list and close any entries that have not been used since
  490. * the last scan.
  491. *
  492. * Note this can deadlock with nfsd_file_cache_purge.
  493. */
  494. static void
  495. nfsd_file_delayed_close(struct work_struct *work)
  496. {
  497. LIST_HEAD(head);
  498. struct nfsd_fcache_disposal *l = container_of(work,
  499. struct nfsd_fcache_disposal, work);
  500. nfsd_file_list_remove_disposal(&head, l);
  501. nfsd_file_dispose_list(&head);
  502. }
  503. static int
  504. nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
  505. void *data)
  506. {
  507. struct file_lock *fl = data;
  508. /* Only close files for F_SETLEASE leases */
  509. if (fl->fl_flags & FL_LEASE)
  510. nfsd_file_close_inode_sync(file_inode(fl->fl_file));
  511. return 0;
  512. }
  513. static struct notifier_block nfsd_file_lease_notifier = {
  514. .notifier_call = nfsd_file_lease_notifier_call,
  515. };
  516. static int
  517. nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask,
  518. struct inode *inode, struct inode *dir,
  519. const struct qstr *name, u32 cookie)
  520. {
  521. trace_nfsd_file_fsnotify_handle_event(inode, mask);
  522. /* Should be no marks on non-regular files */
  523. if (!S_ISREG(inode->i_mode)) {
  524. WARN_ON_ONCE(1);
  525. return 0;
  526. }
  527. /* don't close files if this was not the last link */
  528. if (mask & FS_ATTRIB) {
  529. if (inode->i_nlink)
  530. return 0;
  531. }
  532. nfsd_file_close_inode(inode);
  533. return 0;
  534. }
  535. static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
  536. .handle_inode_event = nfsd_file_fsnotify_handle_event,
  537. .free_mark = nfsd_file_mark_free,
  538. };
  539. int
  540. nfsd_file_cache_init(void)
  541. {
  542. int ret = -ENOMEM;
  543. unsigned int i;
  544. clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
  545. if (nfsd_file_hashtbl)
  546. return 0;
  547. nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
  548. if (!nfsd_filecache_wq)
  549. goto out;
  550. nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE,
  551. sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
  552. if (!nfsd_file_hashtbl) {
  553. pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
  554. goto out_err;
  555. }
  556. nfsd_file_slab = kmem_cache_create("nfsd_file",
  557. sizeof(struct nfsd_file), 0, 0, NULL);
  558. if (!nfsd_file_slab) {
  559. pr_err("nfsd: unable to create nfsd_file_slab\n");
  560. goto out_err;
  561. }
  562. nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
  563. sizeof(struct nfsd_file_mark), 0, 0, NULL);
  564. if (!nfsd_file_mark_slab) {
  565. pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
  566. goto out_err;
  567. }
  568. ret = list_lru_init(&nfsd_file_lru);
  569. if (ret) {
  570. pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
  571. goto out_err;
  572. }
  573. ret = register_shrinker(&nfsd_file_shrinker);
  574. if (ret) {
  575. pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
  576. goto out_lru;
  577. }
  578. ret = lease_register_notifier(&nfsd_file_lease_notifier);
  579. if (ret) {
  580. pr_err("nfsd: unable to register lease notifier: %d\n", ret);
  581. goto out_shrinker;
  582. }
  583. nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
  584. if (IS_ERR(nfsd_file_fsnotify_group)) {
  585. pr_err("nfsd: unable to create fsnotify group: %ld\n",
  586. PTR_ERR(nfsd_file_fsnotify_group));
  587. nfsd_file_fsnotify_group = NULL;
  588. goto out_notifier;
  589. }
  590. for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
  591. INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
  592. spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
  593. }
  594. INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
  595. out:
  596. return ret;
  597. out_notifier:
  598. lease_unregister_notifier(&nfsd_file_lease_notifier);
  599. out_shrinker:
  600. unregister_shrinker(&nfsd_file_shrinker);
  601. out_lru:
  602. list_lru_destroy(&nfsd_file_lru);
  603. out_err:
  604. kmem_cache_destroy(nfsd_file_slab);
  605. nfsd_file_slab = NULL;
  606. kmem_cache_destroy(nfsd_file_mark_slab);
  607. nfsd_file_mark_slab = NULL;
  608. kvfree(nfsd_file_hashtbl);
  609. nfsd_file_hashtbl = NULL;
  610. destroy_workqueue(nfsd_filecache_wq);
  611. nfsd_filecache_wq = NULL;
  612. goto out;
  613. }
  614. /*
  615. * Note this can deadlock with nfsd_file_lru_cb.
  616. */
  617. void
  618. nfsd_file_cache_purge(struct net *net)
  619. {
  620. unsigned int i;
  621. struct nfsd_file *nf;
  622. struct hlist_node *next;
  623. LIST_HEAD(dispose);
  624. bool del;
  625. if (!nfsd_file_hashtbl)
  626. return;
  627. for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
  628. struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
  629. spin_lock(&nfb->nfb_lock);
  630. hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
  631. if (net && nf->nf_net != net)
  632. continue;
  633. del = nfsd_file_unhash_and_release_locked(nf, &dispose);
  634. /*
  635. * Deadlock detected! Something marked this entry as
  636. * unhased, but hasn't removed it from the hash list.
  637. */
  638. WARN_ON_ONCE(!del);
  639. }
  640. spin_unlock(&nfb->nfb_lock);
  641. nfsd_file_dispose_list(&dispose);
  642. }
  643. }
  644. static struct nfsd_fcache_disposal *
  645. nfsd_alloc_fcache_disposal(struct net *net)
  646. {
  647. struct nfsd_fcache_disposal *l;
  648. l = kmalloc(sizeof(*l), GFP_KERNEL);
  649. if (!l)
  650. return NULL;
  651. INIT_WORK(&l->work, nfsd_file_delayed_close);
  652. l->net = net;
  653. spin_lock_init(&l->lock);
  654. INIT_LIST_HEAD(&l->freeme);
  655. return l;
  656. }
  657. static void
  658. nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
  659. {
  660. rcu_assign_pointer(l->net, NULL);
  661. cancel_work_sync(&l->work);
  662. nfsd_file_dispose_list(&l->freeme);
  663. kfree_rcu(l, rcu);
  664. }
  665. static void
  666. nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
  667. {
  668. spin_lock(&laundrette_lock);
  669. list_add_tail_rcu(&l->list, &laundrettes);
  670. spin_unlock(&laundrette_lock);
  671. }
  672. static void
  673. nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
  674. {
  675. spin_lock(&laundrette_lock);
  676. list_del_rcu(&l->list);
  677. spin_unlock(&laundrette_lock);
  678. }
  679. static int
  680. nfsd_alloc_fcache_disposal_net(struct net *net)
  681. {
  682. struct nfsd_fcache_disposal *l;
  683. l = nfsd_alloc_fcache_disposal(net);
  684. if (!l)
  685. return -ENOMEM;
  686. nfsd_add_fcache_disposal(l);
  687. return 0;
  688. }
  689. static void
  690. nfsd_free_fcache_disposal_net(struct net *net)
  691. {
  692. struct nfsd_fcache_disposal *l;
  693. rcu_read_lock();
  694. list_for_each_entry_rcu(l, &laundrettes, list) {
  695. if (l->net != net)
  696. continue;
  697. nfsd_del_fcache_disposal(l);
  698. rcu_read_unlock();
  699. nfsd_free_fcache_disposal(l);
  700. return;
  701. }
  702. rcu_read_unlock();
  703. }
  704. int
  705. nfsd_file_cache_start_net(struct net *net)
  706. {
  707. return nfsd_alloc_fcache_disposal_net(net);
  708. }
  709. void
  710. nfsd_file_cache_shutdown_net(struct net *net)
  711. {
  712. nfsd_file_cache_purge(net);
  713. nfsd_free_fcache_disposal_net(net);
  714. }
  715. void
  716. nfsd_file_cache_shutdown(void)
  717. {
  718. set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
  719. lease_unregister_notifier(&nfsd_file_lease_notifier);
  720. unregister_shrinker(&nfsd_file_shrinker);
  721. /*
  722. * make sure all callers of nfsd_file_lru_cb are done before
  723. * calling nfsd_file_cache_purge
  724. */
  725. cancel_delayed_work_sync(&nfsd_filecache_laundrette);
  726. nfsd_file_cache_purge(NULL);
  727. list_lru_destroy(&nfsd_file_lru);
  728. rcu_barrier();
  729. fsnotify_put_group(nfsd_file_fsnotify_group);
  730. nfsd_file_fsnotify_group = NULL;
  731. kmem_cache_destroy(nfsd_file_slab);
  732. nfsd_file_slab = NULL;
  733. fsnotify_wait_marks_destroyed();
  734. kmem_cache_destroy(nfsd_file_mark_slab);
  735. nfsd_file_mark_slab = NULL;
  736. kvfree(nfsd_file_hashtbl);
  737. nfsd_file_hashtbl = NULL;
  738. destroy_workqueue(nfsd_filecache_wq);
  739. nfsd_filecache_wq = NULL;
  740. }
  741. static bool
  742. nfsd_match_cred(const struct cred *c1, const struct cred *c2)
  743. {
  744. int i;
  745. if (!uid_eq(c1->fsuid, c2->fsuid))
  746. return false;
  747. if (!gid_eq(c1->fsgid, c2->fsgid))
  748. return false;
  749. if (c1->group_info == NULL || c2->group_info == NULL)
  750. return c1->group_info == c2->group_info;
  751. if (c1->group_info->ngroups != c2->group_info->ngroups)
  752. return false;
  753. for (i = 0; i < c1->group_info->ngroups; i++) {
  754. if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
  755. return false;
  756. }
  757. return true;
  758. }
  759. static struct nfsd_file *
  760. nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
  761. unsigned int hashval, struct net *net)
  762. {
  763. struct nfsd_file *nf;
  764. unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
  765. hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
  766. nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
  767. if (nf->nf_may != need)
  768. continue;
  769. if (nf->nf_inode != inode)
  770. continue;
  771. if (nf->nf_net != net)
  772. continue;
  773. if (!nfsd_match_cred(nf->nf_cred, current_cred()))
  774. continue;
  775. if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
  776. continue;
  777. if (nfsd_file_get(nf) != NULL)
  778. return nf;
  779. }
  780. return NULL;
  781. }
  782. /**
  783. * nfsd_file_is_cached - are there any cached open files for this fh?
  784. * @inode: inode of the file to check
  785. *
  786. * Scan the hashtable for open files that match this fh. Returns true if there
  787. * are any, and false if not.
  788. */
  789. bool
  790. nfsd_file_is_cached(struct inode *inode)
  791. {
  792. bool ret = false;
  793. struct nfsd_file *nf;
  794. unsigned int hashval;
  795. hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
  796. rcu_read_lock();
  797. hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
  798. nf_node) {
  799. if (inode == nf->nf_inode) {
  800. ret = true;
  801. break;
  802. }
  803. }
  804. rcu_read_unlock();
  805. trace_nfsd_file_is_cached(inode, hashval, (int)ret);
  806. return ret;
  807. }
  808. __be32
  809. nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
  810. unsigned int may_flags, struct nfsd_file **pnf)
  811. {
  812. __be32 status;
  813. struct net *net = SVC_NET(rqstp);
  814. struct nfsd_file *nf, *new;
  815. struct inode *inode;
  816. unsigned int hashval;
  817. bool retry = true;
  818. /* FIXME: skip this if fh_dentry is already set? */
  819. status = fh_verify(rqstp, fhp, S_IFREG,
  820. may_flags|NFSD_MAY_OWNER_OVERRIDE);
  821. if (status != nfs_ok)
  822. return status;
  823. inode = d_inode(fhp->fh_dentry);
  824. hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
  825. retry:
  826. rcu_read_lock();
  827. nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
  828. rcu_read_unlock();
  829. if (nf)
  830. goto wait_for_construction;
  831. new = nfsd_file_alloc(inode, may_flags, hashval, net);
  832. if (!new) {
  833. trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
  834. NULL, nfserr_jukebox);
  835. return nfserr_jukebox;
  836. }
  837. spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
  838. nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
  839. if (nf == NULL)
  840. goto open_file;
  841. spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
  842. nfsd_file_slab_free(&new->nf_rcu);
  843. wait_for_construction:
  844. wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
  845. /* Did construction of this file fail? */
  846. if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
  847. if (!retry) {
  848. status = nfserr_jukebox;
  849. goto out;
  850. }
  851. retry = false;
  852. nfsd_file_put_noref(nf);
  853. goto retry;
  854. }
  855. this_cpu_inc(nfsd_file_cache_hits);
  856. if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
  857. bool write = (may_flags & NFSD_MAY_WRITE);
  858. if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
  859. (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
  860. status = nfserrno(nfsd_open_break_lease(
  861. file_inode(nf->nf_file), may_flags));
  862. if (status == nfs_ok) {
  863. clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
  864. if (write)
  865. clear_bit(NFSD_FILE_BREAK_WRITE,
  866. &nf->nf_flags);
  867. }
  868. }
  869. }
  870. out:
  871. if (status == nfs_ok) {
  872. *pnf = nf;
  873. } else {
  874. nfsd_file_put(nf);
  875. nf = NULL;
  876. }
  877. trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
  878. return status;
  879. open_file:
  880. nf = new;
  881. /* Take reference for the hashtable */
  882. refcount_inc(&nf->nf_ref);
  883. __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
  884. __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
  885. list_lru_add(&nfsd_file_lru, &nf->nf_lru);
  886. hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
  887. ++nfsd_file_hashtbl[hashval].nfb_count;
  888. nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
  889. nfsd_file_hashtbl[hashval].nfb_count);
  890. spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
  891. if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
  892. nfsd_file_gc();
  893. nf->nf_mark = nfsd_file_mark_find_or_create(nf);
  894. if (nf->nf_mark)
  895. status = nfsd_open_verified(rqstp, fhp, S_IFREG,
  896. may_flags, &nf->nf_file);
  897. else
  898. status = nfserr_jukebox;
  899. /*
  900. * If construction failed, or we raced with a call to unlink()
  901. * then unhash.
  902. */
  903. if (status != nfs_ok || inode->i_nlink == 0) {
  904. bool do_free;
  905. spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
  906. do_free = nfsd_file_unhash(nf);
  907. spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
  908. if (do_free)
  909. nfsd_file_put_noref(nf);
  910. }
  911. clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
  912. smp_mb__after_atomic();
  913. wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
  914. goto out;
  915. }
  916. /*
  917. * Note that fields may be added, removed or reordered in the future. Programs
  918. * scraping this file for info should test the labels to ensure they're
  919. * getting the correct field.
  920. */
  921. static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
  922. {
  923. unsigned int i, count = 0, longest = 0;
  924. unsigned long hits = 0;
  925. /*
  926. * No need for spinlocks here since we're not terribly interested in
  927. * accuracy. We do take the nfsd_mutex simply to ensure that we
  928. * don't end up racing with server shutdown
  929. */
  930. mutex_lock(&nfsd_mutex);
  931. if (nfsd_file_hashtbl) {
  932. for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
  933. count += nfsd_file_hashtbl[i].nfb_count;
  934. longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
  935. }
  936. }
  937. mutex_unlock(&nfsd_mutex);
  938. for_each_possible_cpu(i)
  939. hits += per_cpu(nfsd_file_cache_hits, i);
  940. seq_printf(m, "total entries: %u\n", count);
  941. seq_printf(m, "longest chain: %u\n", longest);
  942. seq_printf(m, "cache hits: %lu\n", hits);
  943. return 0;
  944. }
  945. int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
  946. {
  947. return single_open(file, nfsd_file_cache_stats_show, NULL);
  948. }