nfs42xattr.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
  4. *
  5. * User extended attribute client side cache functions.
  6. *
  7. * Author: Frank van der Linden <fllinden@amazon.com>
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/nfs_fs.h>
  11. #include <linux/hashtable.h>
  12. #include <linux/refcount.h>
  13. #include <uapi/linux/xattr.h>
  14. #include "nfs4_fs.h"
  15. #include "internal.h"
  16. /*
  17. * User extended attributes client side caching is implemented by having
  18. * a cache structure attached to NFS inodes. This structure is allocated
  19. * when needed, and freed when the cache is zapped.
  20. *
  21. * The cache structure contains as hash table of entries, and a pointer
  22. * to a special-cased entry for the listxattr cache.
  23. *
  24. * Accessing and allocating / freeing the caches is done via reference
  25. * counting. The cache entries use a similar refcounting scheme.
  26. *
  27. * This makes freeing a cache, both from the shrinker and from the
  28. * zap cache path, easy. It also means that, in current use cases,
  29. * the large majority of inodes will not waste any memory, as they
  30. * will never have any user extended attributes assigned to them.
  31. *
  32. * Attribute entries are hashed in to a simple hash table. They are
  33. * also part of an LRU.
  34. *
  35. * There are three shrinkers.
  36. *
  37. * Two shrinkers deal with the cache entries themselves: one for
  38. * large entries (> PAGE_SIZE), and one for smaller entries. The
  39. * shrinker for the larger entries works more aggressively than
  40. * those for the smaller entries.
  41. *
  42. * The other shrinker frees the cache structures themselves.
  43. */
  44. /*
  45. * 64 buckets is a good default. There is likely no reasonable
  46. * workload that uses more than even 64 user extended attributes.
  47. * You can certainly add a lot more - but you get what you ask for
  48. * in those circumstances.
  49. */
  50. #define NFS4_XATTR_HASH_SIZE 64
  51. #define NFSDBG_FACILITY NFSDBG_XATTRCACHE
  52. struct nfs4_xattr_cache;
  53. struct nfs4_xattr_entry;
  54. struct nfs4_xattr_bucket {
  55. spinlock_t lock;
  56. struct hlist_head hlist;
  57. struct nfs4_xattr_cache *cache;
  58. bool draining;
  59. };
  60. struct nfs4_xattr_cache {
  61. struct kref ref;
  62. struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE];
  63. struct list_head lru;
  64. struct list_head dispose;
  65. atomic_long_t nent;
  66. spinlock_t listxattr_lock;
  67. struct inode *inode;
  68. struct nfs4_xattr_entry *listxattr;
  69. };
  70. struct nfs4_xattr_entry {
  71. struct kref ref;
  72. struct hlist_node hnode;
  73. struct list_head lru;
  74. struct list_head dispose;
  75. char *xattr_name;
  76. void *xattr_value;
  77. size_t xattr_size;
  78. struct nfs4_xattr_bucket *bucket;
  79. uint32_t flags;
  80. };
  81. #define NFS4_XATTR_ENTRY_EXTVAL 0x0001
  82. /*
  83. * LRU list of NFS inodes that have xattr caches.
  84. */
  85. static struct list_lru nfs4_xattr_cache_lru;
  86. static struct list_lru nfs4_xattr_entry_lru;
  87. static struct list_lru nfs4_xattr_large_entry_lru;
  88. static struct kmem_cache *nfs4_xattr_cache_cachep;
  89. /*
  90. * Hashing helper functions.
  91. */
  92. static void
  93. nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
  94. {
  95. unsigned int i;
  96. for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
  97. INIT_HLIST_HEAD(&cache->buckets[i].hlist);
  98. spin_lock_init(&cache->buckets[i].lock);
  99. cache->buckets[i].cache = cache;
  100. cache->buckets[i].draining = false;
  101. }
  102. }
  103. /*
  104. * Locking order:
  105. * 1. inode i_lock or bucket lock
  106. * 2. list_lru lock (taken by list_lru_* functions)
  107. */
  108. /*
  109. * Wrapper functions to add a cache entry to the right LRU.
  110. */
  111. static bool
  112. nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
  113. {
  114. struct list_lru *lru;
  115. lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
  116. &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
  117. return list_lru_add(lru, &entry->lru);
  118. }
  119. static bool
  120. nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
  121. {
  122. struct list_lru *lru;
  123. lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
  124. &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
  125. return list_lru_del(lru, &entry->lru);
  126. }
  127. /*
  128. * This function allocates cache entries. They are the normal
  129. * extended attribute name/value pairs, but may also be a listxattr
  130. * cache. Those allocations use the same entry so that they can be
  131. * treated as one by the memory shrinker.
  132. *
  133. * xattr cache entries are allocated together with names. If the
  134. * value fits in to one page with the entry structure and the name,
  135. * it will also be part of the same allocation (kmalloc). This is
  136. * expected to be the vast majority of cases. Larger allocations
  137. * have a value pointer that is allocated separately by kvmalloc.
  138. *
  139. * Parameters:
  140. *
  141. * @name: Name of the extended attribute. NULL for listxattr cache
  142. * entry.
  143. * @value: Value of attribute, or listxattr cache. NULL if the
  144. * value is to be copied from pages instead.
  145. * @pages: Pages to copy the value from, if not NULL. Passed in to
  146. * make it easier to copy the value after an RPC, even if
  147. * the value will not be passed up to application (e.g.
  148. * for a 'query' getxattr with NULL buffer).
  149. * @len: Length of the value. Can be 0 for zero-length attribues.
  150. * @value and @pages will be NULL if @len is 0.
  151. */
  152. static struct nfs4_xattr_entry *
  153. nfs4_xattr_alloc_entry(const char *name, const void *value,
  154. struct page **pages, size_t len)
  155. {
  156. struct nfs4_xattr_entry *entry;
  157. void *valp;
  158. char *namep;
  159. size_t alloclen, slen;
  160. char *buf;
  161. uint32_t flags;
  162. BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) +
  163. XATTR_NAME_MAX + 1 > PAGE_SIZE);
  164. alloclen = sizeof(struct nfs4_xattr_entry);
  165. if (name != NULL) {
  166. slen = strlen(name) + 1;
  167. alloclen += slen;
  168. } else
  169. slen = 0;
  170. if (alloclen + len <= PAGE_SIZE) {
  171. alloclen += len;
  172. flags = 0;
  173. } else {
  174. flags = NFS4_XATTR_ENTRY_EXTVAL;
  175. }
  176. buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS);
  177. if (buf == NULL)
  178. return NULL;
  179. entry = (struct nfs4_xattr_entry *)buf;
  180. if (name != NULL) {
  181. namep = buf + sizeof(struct nfs4_xattr_entry);
  182. memcpy(namep, name, slen);
  183. } else {
  184. namep = NULL;
  185. }
  186. if (flags & NFS4_XATTR_ENTRY_EXTVAL) {
  187. valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS);
  188. if (valp == NULL) {
  189. kfree(buf);
  190. return NULL;
  191. }
  192. } else if (len != 0) {
  193. valp = buf + sizeof(struct nfs4_xattr_entry) + slen;
  194. } else
  195. valp = NULL;
  196. if (valp != NULL) {
  197. if (value != NULL)
  198. memcpy(valp, value, len);
  199. else
  200. _copy_from_pages(valp, pages, 0, len);
  201. }
  202. entry->flags = flags;
  203. entry->xattr_value = valp;
  204. kref_init(&entry->ref);
  205. entry->xattr_name = namep;
  206. entry->xattr_size = len;
  207. entry->bucket = NULL;
  208. INIT_LIST_HEAD(&entry->lru);
  209. INIT_LIST_HEAD(&entry->dispose);
  210. INIT_HLIST_NODE(&entry->hnode);
  211. return entry;
  212. }
  213. static void
  214. nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry)
  215. {
  216. if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL)
  217. kvfree(entry->xattr_value);
  218. kfree(entry);
  219. }
  220. static void
  221. nfs4_xattr_free_entry_cb(struct kref *kref)
  222. {
  223. struct nfs4_xattr_entry *entry;
  224. entry = container_of(kref, struct nfs4_xattr_entry, ref);
  225. if (WARN_ON(!list_empty(&entry->lru)))
  226. return;
  227. nfs4_xattr_free_entry(entry);
  228. }
  229. static void
  230. nfs4_xattr_free_cache_cb(struct kref *kref)
  231. {
  232. struct nfs4_xattr_cache *cache;
  233. int i;
  234. cache = container_of(kref, struct nfs4_xattr_cache, ref);
  235. for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
  236. if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
  237. return;
  238. cache->buckets[i].draining = false;
  239. }
  240. cache->listxattr = NULL;
  241. kmem_cache_free(nfs4_xattr_cache_cachep, cache);
  242. }
  243. static struct nfs4_xattr_cache *
  244. nfs4_xattr_alloc_cache(void)
  245. {
  246. struct nfs4_xattr_cache *cache;
  247. cache = kmem_cache_alloc(nfs4_xattr_cache_cachep,
  248. GFP_KERNEL_ACCOUNT | GFP_NOFS);
  249. if (cache == NULL)
  250. return NULL;
  251. kref_init(&cache->ref);
  252. atomic_long_set(&cache->nent, 0);
  253. return cache;
  254. }
  255. /*
  256. * Set the listxattr cache, which is a special-cased cache entry.
  257. * The special value ERR_PTR(-ESTALE) is used to indicate that
  258. * the cache is being drained - this prevents a new listxattr
  259. * cache from being added to what is now a stale cache.
  260. */
  261. static int
  262. nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
  263. struct nfs4_xattr_entry *new)
  264. {
  265. struct nfs4_xattr_entry *old;
  266. int ret = 1;
  267. spin_lock(&cache->listxattr_lock);
  268. old = cache->listxattr;
  269. if (old == ERR_PTR(-ESTALE)) {
  270. ret = 0;
  271. goto out;
  272. }
  273. cache->listxattr = new;
  274. if (new != NULL && new != ERR_PTR(-ESTALE))
  275. nfs4_xattr_entry_lru_add(new);
  276. if (old != NULL) {
  277. nfs4_xattr_entry_lru_del(old);
  278. kref_put(&old->ref, nfs4_xattr_free_entry_cb);
  279. }
  280. out:
  281. spin_unlock(&cache->listxattr_lock);
  282. return ret;
  283. }
  284. /*
  285. * Unlink a cache from its parent inode, clearing out an invalid
  286. * cache. Must be called with i_lock held.
  287. */
  288. static struct nfs4_xattr_cache *
  289. nfs4_xattr_cache_unlink(struct inode *inode)
  290. {
  291. struct nfs_inode *nfsi;
  292. struct nfs4_xattr_cache *oldcache;
  293. nfsi = NFS_I(inode);
  294. oldcache = nfsi->xattr_cache;
  295. if (oldcache != NULL) {
  296. list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
  297. oldcache->inode = NULL;
  298. }
  299. nfsi->xattr_cache = NULL;
  300. nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR;
  301. return oldcache;
  302. }
  303. /*
  304. * Discard a cache. Called by get_cache() if there was an old,
  305. * invalid cache. Can also be called from a shrinker callback.
  306. *
  307. * The cache is dead, it has already been unlinked from its inode,
  308. * and no longer appears on the cache LRU list.
  309. *
  310. * Mark all buckets as draining, so that no new entries are added. This
  311. * could still happen in the unlikely, but possible case that another
  312. * thread had grabbed a reference before it was unlinked from the inode,
  313. * and is still holding it for an add operation.
  314. *
  315. * Remove all entries from the LRU lists, so that there is no longer
  316. * any way to 'find' this cache. Then, remove the entries from the hash
  317. * table.
  318. *
  319. * At that point, the cache will remain empty and can be freed when the final
  320. * reference drops, which is very likely the kref_put at the end of
  321. * this function, or the one called immediately afterwards in the
  322. * shrinker callback.
  323. */
  324. static void
  325. nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
  326. {
  327. unsigned int i;
  328. struct nfs4_xattr_entry *entry;
  329. struct nfs4_xattr_bucket *bucket;
  330. struct hlist_node *n;
  331. nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
  332. for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
  333. bucket = &cache->buckets[i];
  334. spin_lock(&bucket->lock);
  335. bucket->draining = true;
  336. hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) {
  337. nfs4_xattr_entry_lru_del(entry);
  338. hlist_del_init(&entry->hnode);
  339. kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
  340. }
  341. spin_unlock(&bucket->lock);
  342. }
  343. atomic_long_set(&cache->nent, 0);
  344. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  345. }
  346. /*
  347. * Get a referenced copy of the cache structure. Avoid doing allocs
  348. * while holding i_lock. Which means that we do some optimistic allocation,
  349. * and might have to free the result in rare cases.
  350. *
  351. * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
  352. * and acts accordingly, replacing the cache when needed. For the read case
  353. * (!add), this means that the caller must make sure that the cache
  354. * is valid before caling this function. getxattr and listxattr call
  355. * revalidate_inode to do this. The attribute cache timeout (for the
  356. * non-delegated case) is expected to be dealt with in the revalidate
  357. * call.
  358. */
  359. static struct nfs4_xattr_cache *
  360. nfs4_xattr_get_cache(struct inode *inode, int add)
  361. {
  362. struct nfs_inode *nfsi;
  363. struct nfs4_xattr_cache *cache, *oldcache, *newcache;
  364. nfsi = NFS_I(inode);
  365. cache = oldcache = NULL;
  366. spin_lock(&inode->i_lock);
  367. if (nfsi->cache_validity & NFS_INO_INVALID_XATTR)
  368. oldcache = nfs4_xattr_cache_unlink(inode);
  369. else
  370. cache = nfsi->xattr_cache;
  371. if (cache != NULL)
  372. kref_get(&cache->ref);
  373. spin_unlock(&inode->i_lock);
  374. if (add && cache == NULL) {
  375. newcache = NULL;
  376. cache = nfs4_xattr_alloc_cache();
  377. if (cache == NULL)
  378. goto out;
  379. spin_lock(&inode->i_lock);
  380. if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) {
  381. /*
  382. * The cache was invalidated again. Give up,
  383. * since what we want to enter is now likely
  384. * outdated anyway.
  385. */
  386. spin_unlock(&inode->i_lock);
  387. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  388. cache = NULL;
  389. goto out;
  390. }
  391. /*
  392. * Check if someone beat us to it.
  393. */
  394. if (nfsi->xattr_cache != NULL) {
  395. newcache = nfsi->xattr_cache;
  396. kref_get(&newcache->ref);
  397. } else {
  398. kref_get(&cache->ref);
  399. nfsi->xattr_cache = cache;
  400. cache->inode = inode;
  401. list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
  402. }
  403. spin_unlock(&inode->i_lock);
  404. /*
  405. * If there was a race, throw away the cache we just
  406. * allocated, and use the new one allocated by someone
  407. * else.
  408. */
  409. if (newcache != NULL) {
  410. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  411. cache = newcache;
  412. }
  413. }
  414. out:
  415. /*
  416. * Discard the now orphaned old cache.
  417. */
  418. if (oldcache != NULL)
  419. nfs4_xattr_discard_cache(oldcache);
  420. return cache;
  421. }
  422. static inline struct nfs4_xattr_bucket *
  423. nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
  424. {
  425. return &cache->buckets[jhash(name, strlen(name), 0) &
  426. (ARRAY_SIZE(cache->buckets) - 1)];
  427. }
  428. static struct nfs4_xattr_entry *
  429. nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name)
  430. {
  431. struct nfs4_xattr_entry *entry;
  432. entry = NULL;
  433. hlist_for_each_entry(entry, &bucket->hlist, hnode) {
  434. if (!strcmp(entry->xattr_name, name))
  435. break;
  436. }
  437. return entry;
  438. }
  439. static int
  440. nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
  441. struct nfs4_xattr_entry *entry)
  442. {
  443. struct nfs4_xattr_bucket *bucket;
  444. struct nfs4_xattr_entry *oldentry = NULL;
  445. int ret = 1;
  446. bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
  447. entry->bucket = bucket;
  448. spin_lock(&bucket->lock);
  449. if (bucket->draining) {
  450. ret = 0;
  451. goto out;
  452. }
  453. oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name);
  454. if (oldentry != NULL) {
  455. hlist_del_init(&oldentry->hnode);
  456. nfs4_xattr_entry_lru_del(oldentry);
  457. } else {
  458. atomic_long_inc(&cache->nent);
  459. }
  460. hlist_add_head(&entry->hnode, &bucket->hlist);
  461. nfs4_xattr_entry_lru_add(entry);
  462. out:
  463. spin_unlock(&bucket->lock);
  464. if (oldentry != NULL)
  465. kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb);
  466. return ret;
  467. }
  468. static void
  469. nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
  470. {
  471. struct nfs4_xattr_bucket *bucket;
  472. struct nfs4_xattr_entry *entry;
  473. bucket = nfs4_xattr_hash_bucket(cache, name);
  474. spin_lock(&bucket->lock);
  475. entry = nfs4_xattr_get_entry(bucket, name);
  476. if (entry != NULL) {
  477. hlist_del_init(&entry->hnode);
  478. nfs4_xattr_entry_lru_del(entry);
  479. atomic_long_dec(&cache->nent);
  480. }
  481. spin_unlock(&bucket->lock);
  482. if (entry != NULL)
  483. kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
  484. }
  485. static struct nfs4_xattr_entry *
  486. nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
  487. {
  488. struct nfs4_xattr_bucket *bucket;
  489. struct nfs4_xattr_entry *entry;
  490. bucket = nfs4_xattr_hash_bucket(cache, name);
  491. spin_lock(&bucket->lock);
  492. entry = nfs4_xattr_get_entry(bucket, name);
  493. if (entry != NULL)
  494. kref_get(&entry->ref);
  495. spin_unlock(&bucket->lock);
  496. return entry;
  497. }
  498. /*
  499. * Entry point to retrieve an entry from the cache.
  500. */
  501. ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf,
  502. ssize_t buflen)
  503. {
  504. struct nfs4_xattr_cache *cache;
  505. struct nfs4_xattr_entry *entry;
  506. ssize_t ret;
  507. cache = nfs4_xattr_get_cache(inode, 0);
  508. if (cache == NULL)
  509. return -ENOENT;
  510. ret = 0;
  511. entry = nfs4_xattr_hash_find(cache, name);
  512. if (entry != NULL) {
  513. dprintk("%s: cache hit '%s', len %lu\n", __func__,
  514. entry->xattr_name, (unsigned long)entry->xattr_size);
  515. if (buflen == 0) {
  516. /* Length probe only */
  517. ret = entry->xattr_size;
  518. } else if (buflen < entry->xattr_size)
  519. ret = -ERANGE;
  520. else {
  521. memcpy(buf, entry->xattr_value, entry->xattr_size);
  522. ret = entry->xattr_size;
  523. }
  524. kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
  525. } else {
  526. dprintk("%s: cache miss '%s'\n", __func__, name);
  527. ret = -ENOENT;
  528. }
  529. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  530. return ret;
  531. }
  532. /*
  533. * Retrieve a cached list of xattrs from the cache.
  534. */
  535. ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen)
  536. {
  537. struct nfs4_xattr_cache *cache;
  538. struct nfs4_xattr_entry *entry;
  539. ssize_t ret;
  540. cache = nfs4_xattr_get_cache(inode, 0);
  541. if (cache == NULL)
  542. return -ENOENT;
  543. spin_lock(&cache->listxattr_lock);
  544. entry = cache->listxattr;
  545. if (entry != NULL && entry != ERR_PTR(-ESTALE)) {
  546. if (buflen == 0) {
  547. /* Length probe only */
  548. ret = entry->xattr_size;
  549. } else if (entry->xattr_size > buflen)
  550. ret = -ERANGE;
  551. else {
  552. memcpy(buf, entry->xattr_value, entry->xattr_size);
  553. ret = entry->xattr_size;
  554. }
  555. } else {
  556. ret = -ENOENT;
  557. }
  558. spin_unlock(&cache->listxattr_lock);
  559. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  560. return ret;
  561. }
  562. /*
  563. * Add an xattr to the cache.
  564. *
  565. * This also invalidates the xattr list cache.
  566. */
  567. void nfs4_xattr_cache_add(struct inode *inode, const char *name,
  568. const char *buf, struct page **pages, ssize_t buflen)
  569. {
  570. struct nfs4_xattr_cache *cache;
  571. struct nfs4_xattr_entry *entry;
  572. dprintk("%s: add '%s' len %lu\n", __func__,
  573. name, (unsigned long)buflen);
  574. cache = nfs4_xattr_get_cache(inode, 1);
  575. if (cache == NULL)
  576. return;
  577. entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen);
  578. if (entry == NULL)
  579. goto out;
  580. (void)nfs4_xattr_set_listcache(cache, NULL);
  581. if (!nfs4_xattr_hash_add(cache, entry))
  582. kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
  583. out:
  584. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  585. }
  586. /*
  587. * Remove an xattr from the cache.
  588. *
  589. * This also invalidates the xattr list cache.
  590. */
  591. void nfs4_xattr_cache_remove(struct inode *inode, const char *name)
  592. {
  593. struct nfs4_xattr_cache *cache;
  594. dprintk("%s: remove '%s'\n", __func__, name);
  595. cache = nfs4_xattr_get_cache(inode, 0);
  596. if (cache == NULL)
  597. return;
  598. (void)nfs4_xattr_set_listcache(cache, NULL);
  599. nfs4_xattr_hash_remove(cache, name);
  600. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  601. }
  602. /*
  603. * Cache listxattr output, replacing any possible old one.
  604. */
  605. void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf,
  606. ssize_t buflen)
  607. {
  608. struct nfs4_xattr_cache *cache;
  609. struct nfs4_xattr_entry *entry;
  610. cache = nfs4_xattr_get_cache(inode, 1);
  611. if (cache == NULL)
  612. return;
  613. entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen);
  614. if (entry == NULL)
  615. goto out;
  616. /*
  617. * This is just there to be able to get to bucket->cache,
  618. * which is obviously the same for all buckets, so just
  619. * use bucket 0.
  620. */
  621. entry->bucket = &cache->buckets[0];
  622. if (!nfs4_xattr_set_listcache(cache, entry))
  623. kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
  624. out:
  625. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  626. }
  627. /*
  628. * Zap the entire cache. Called when an inode is evicted.
  629. */
  630. void nfs4_xattr_cache_zap(struct inode *inode)
  631. {
  632. struct nfs4_xattr_cache *oldcache;
  633. spin_lock(&inode->i_lock);
  634. oldcache = nfs4_xattr_cache_unlink(inode);
  635. spin_unlock(&inode->i_lock);
  636. if (oldcache)
  637. nfs4_xattr_discard_cache(oldcache);
  638. }
  639. /*
  640. * The entry LRU is shrunk more aggressively than the cache LRU,
  641. * by settings @seeks to 1.
  642. *
  643. * Cache structures are freed only when they've become empty, after
  644. * pruning all but one entry.
  645. */
  646. static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink,
  647. struct shrink_control *sc);
  648. static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink,
  649. struct shrink_control *sc);
  650. static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink,
  651. struct shrink_control *sc);
  652. static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
  653. struct shrink_control *sc);
  654. static struct shrinker nfs4_xattr_cache_shrinker = {
  655. .count_objects = nfs4_xattr_cache_count,
  656. .scan_objects = nfs4_xattr_cache_scan,
  657. .seeks = DEFAULT_SEEKS,
  658. .flags = SHRINKER_MEMCG_AWARE,
  659. };
  660. static struct shrinker nfs4_xattr_entry_shrinker = {
  661. .count_objects = nfs4_xattr_entry_count,
  662. .scan_objects = nfs4_xattr_entry_scan,
  663. .seeks = DEFAULT_SEEKS,
  664. .batch = 512,
  665. .flags = SHRINKER_MEMCG_AWARE,
  666. };
  667. static struct shrinker nfs4_xattr_large_entry_shrinker = {
  668. .count_objects = nfs4_xattr_entry_count,
  669. .scan_objects = nfs4_xattr_entry_scan,
  670. .seeks = 1,
  671. .batch = 512,
  672. .flags = SHRINKER_MEMCG_AWARE,
  673. };
  674. static enum lru_status
  675. cache_lru_isolate(struct list_head *item,
  676. struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
  677. {
  678. struct list_head *dispose = arg;
  679. struct inode *inode;
  680. struct nfs4_xattr_cache *cache = container_of(item,
  681. struct nfs4_xattr_cache, lru);
  682. if (atomic_long_read(&cache->nent) > 1)
  683. return LRU_SKIP;
  684. /*
  685. * If a cache structure is on the LRU list, we know that
  686. * its inode is valid. Try to lock it to break the link.
  687. * Since we're inverting the lock order here, only try.
  688. */
  689. inode = cache->inode;
  690. if (!spin_trylock(&inode->i_lock))
  691. return LRU_SKIP;
  692. kref_get(&cache->ref);
  693. cache->inode = NULL;
  694. NFS_I(inode)->xattr_cache = NULL;
  695. NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR;
  696. list_lru_isolate(lru, &cache->lru);
  697. spin_unlock(&inode->i_lock);
  698. list_add_tail(&cache->dispose, dispose);
  699. return LRU_REMOVED;
  700. }
  701. static unsigned long
  702. nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
  703. {
  704. LIST_HEAD(dispose);
  705. unsigned long freed;
  706. struct nfs4_xattr_cache *cache;
  707. freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc,
  708. cache_lru_isolate, &dispose);
  709. while (!list_empty(&dispose)) {
  710. cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
  711. dispose);
  712. list_del_init(&cache->dispose);
  713. nfs4_xattr_discard_cache(cache);
  714. kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
  715. }
  716. return freed;
  717. }
  718. static unsigned long
  719. nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
  720. {
  721. unsigned long count;
  722. count = list_lru_shrink_count(&nfs4_xattr_cache_lru, sc);
  723. return vfs_pressure_ratio(count);
  724. }
  725. static enum lru_status
  726. entry_lru_isolate(struct list_head *item,
  727. struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
  728. {
  729. struct list_head *dispose = arg;
  730. struct nfs4_xattr_bucket *bucket;
  731. struct nfs4_xattr_cache *cache;
  732. struct nfs4_xattr_entry *entry = container_of(item,
  733. struct nfs4_xattr_entry, lru);
  734. bucket = entry->bucket;
  735. cache = bucket->cache;
  736. /*
  737. * Unhook the entry from its parent (either a cache bucket
  738. * or a cache structure if it's a listxattr buf), so that
  739. * it's no longer found. Then add it to the isolate list,
  740. * to be freed later.
  741. *
  742. * In both cases, we're reverting lock order, so use
  743. * trylock and skip the entry if we can't get the lock.
  744. */
  745. if (entry->xattr_name != NULL) {
  746. /* Regular cache entry */
  747. if (!spin_trylock(&bucket->lock))
  748. return LRU_SKIP;
  749. kref_get(&entry->ref);
  750. hlist_del_init(&entry->hnode);
  751. atomic_long_dec(&cache->nent);
  752. list_lru_isolate(lru, &entry->lru);
  753. spin_unlock(&bucket->lock);
  754. } else {
  755. /* Listxattr cache entry */
  756. if (!spin_trylock(&cache->listxattr_lock))
  757. return LRU_SKIP;
  758. kref_get(&entry->ref);
  759. cache->listxattr = NULL;
  760. list_lru_isolate(lru, &entry->lru);
  761. spin_unlock(&cache->listxattr_lock);
  762. }
  763. list_add_tail(&entry->dispose, dispose);
  764. return LRU_REMOVED;
  765. }
  766. static unsigned long
  767. nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc)
  768. {
  769. LIST_HEAD(dispose);
  770. unsigned long freed;
  771. struct nfs4_xattr_entry *entry;
  772. struct list_lru *lru;
  773. lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
  774. &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
  775. freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose);
  776. while (!list_empty(&dispose)) {
  777. entry = list_first_entry(&dispose, struct nfs4_xattr_entry,
  778. dispose);
  779. list_del_init(&entry->dispose);
  780. /*
  781. * Drop two references: the one that we just grabbed
  782. * in entry_lru_isolate, and the one that was set
  783. * when the entry was first allocated.
  784. */
  785. kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
  786. kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
  787. }
  788. return freed;
  789. }
  790. static unsigned long
  791. nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
  792. {
  793. unsigned long count;
  794. struct list_lru *lru;
  795. lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
  796. &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
  797. count = list_lru_shrink_count(lru, sc);
  798. return vfs_pressure_ratio(count);
  799. }
  800. static void nfs4_xattr_cache_init_once(void *p)
  801. {
  802. struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
  803. spin_lock_init(&cache->listxattr_lock);
  804. atomic_long_set(&cache->nent, 0);
  805. nfs4_xattr_hash_init(cache);
  806. cache->listxattr = NULL;
  807. INIT_LIST_HEAD(&cache->lru);
  808. INIT_LIST_HEAD(&cache->dispose);
  809. }
  810. int __init nfs4_xattr_cache_init(void)
  811. {
  812. int ret = 0;
  813. nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
  814. sizeof(struct nfs4_xattr_cache), 0,
  815. (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
  816. nfs4_xattr_cache_init_once);
  817. if (nfs4_xattr_cache_cachep == NULL)
  818. return -ENOMEM;
  819. ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
  820. &nfs4_xattr_large_entry_shrinker);
  821. if (ret)
  822. goto out4;
  823. ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
  824. &nfs4_xattr_entry_shrinker);
  825. if (ret)
  826. goto out3;
  827. ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
  828. &nfs4_xattr_cache_shrinker);
  829. if (ret)
  830. goto out2;
  831. ret = register_shrinker(&nfs4_xattr_cache_shrinker);
  832. if (ret)
  833. goto out1;
  834. ret = register_shrinker(&nfs4_xattr_entry_shrinker);
  835. if (ret)
  836. goto out;
  837. ret = register_shrinker(&nfs4_xattr_large_entry_shrinker);
  838. if (!ret)
  839. return 0;
  840. unregister_shrinker(&nfs4_xattr_entry_shrinker);
  841. out:
  842. unregister_shrinker(&nfs4_xattr_cache_shrinker);
  843. out1:
  844. list_lru_destroy(&nfs4_xattr_cache_lru);
  845. out2:
  846. list_lru_destroy(&nfs4_xattr_entry_lru);
  847. out3:
  848. list_lru_destroy(&nfs4_xattr_large_entry_lru);
  849. out4:
  850. kmem_cache_destroy(nfs4_xattr_cache_cachep);
  851. return ret;
  852. }
  853. void nfs4_xattr_cache_exit(void)
  854. {
  855. unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
  856. unregister_shrinker(&nfs4_xattr_entry_shrinker);
  857. unregister_shrinker(&nfs4_xattr_cache_shrinker);
  858. list_lru_destroy(&nfs4_xattr_large_entry_lru);
  859. list_lru_destroy(&nfs4_xattr_entry_lru);
  860. list_lru_destroy(&nfs4_xattr_cache_lru);
  861. kmem_cache_destroy(nfs4_xattr_cache_cachep);
  862. }