pnfs_dev.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * Device operations for the pnfs client.
  3. *
  4. * Copyright (c) 2002
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. * Garth Goodson <Garth.Goodson@netapp.com>
  10. *
  11. * Permission is granted to use, copy, create derivative works, and
  12. * redistribute this software and such derivative works for any purpose,
  13. * so long as the name of the University of Michigan is not used in
  14. * any advertising or publicity pertaining to the use or distribution
  15. * of this software without specific, written prior authorization. If
  16. * the above copyright notice or any other identification of the
  17. * University of Michigan is included in any copy of any portion of
  18. * this software, then the disclaimer below must also be included.
  19. *
  20. * This software is provided as is, without representation or warranty
  21. * of any kind either express or implied, including without limitation
  22. * the implied warranties of merchantability, fitness for a particular
  23. * purpose, or noninfringement. The Regents of the University of
  24. * Michigan shall not be liable for any damages, including special,
  25. * indirect, incidental, or consequential damages, with respect to any
  26. * claim arising out of or in connection with the use of the software,
  27. * even if it has been or is hereafter advised of the possibility of
  28. * such damages.
  29. */
  30. #include <linux/export.h>
  31. #include <linux/nfs_fs.h>
  32. #include "nfs4session.h"
  33. #include "internal.h"
  34. #include "pnfs.h"
  35. #define NFSDBG_FACILITY NFSDBG_PNFS
  36. /*
  37. * Device ID RCU cache. A device ID is unique per server and layout type.
  38. */
  39. #define NFS4_DEVICE_ID_HASH_BITS 5
  40. #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
  41. #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
  42. static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
  43. static DEFINE_SPINLOCK(nfs4_deviceid_lock);
  44. #ifdef NFS_DEBUG
  45. void
  46. nfs4_print_deviceid(const struct nfs4_deviceid *id)
  47. {
  48. u32 *p = (u32 *)id;
  49. dprintk("%s: device id= [%x%x%x%x]\n", __func__,
  50. p[0], p[1], p[2], p[3]);
  51. }
  52. EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
  53. #endif
  54. static inline u32
  55. nfs4_deviceid_hash(const struct nfs4_deviceid *id)
  56. {
  57. unsigned char *cptr = (unsigned char *)id->data;
  58. unsigned int nbytes = NFS4_DEVICEID4_SIZE;
  59. u32 x = 0;
  60. while (nbytes--) {
  61. x *= 37;
  62. x += *cptr++;
  63. }
  64. return x & NFS4_DEVICE_ID_HASH_MASK;
  65. }
  66. static struct nfs4_deviceid_node *
  67. _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
  68. const struct nfs_client *clp, const struct nfs4_deviceid *id,
  69. long hash)
  70. {
  71. struct nfs4_deviceid_node *d;
  72. hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
  73. if (d->ld == ld && d->nfs_client == clp &&
  74. !memcmp(&d->deviceid, id, sizeof(*id))) {
  75. if (atomic_read(&d->ref))
  76. return d;
  77. else
  78. continue;
  79. }
  80. return NULL;
  81. }
  82. static struct nfs4_deviceid_node *
  83. nfs4_get_device_info(struct nfs_server *server,
  84. const struct nfs4_deviceid *dev_id,
  85. const struct cred *cred, gfp_t gfp_flags)
  86. {
  87. struct nfs4_deviceid_node *d = NULL;
  88. struct pnfs_device *pdev = NULL;
  89. struct page **pages = NULL;
  90. u32 max_resp_sz;
  91. int max_pages;
  92. int rc, i;
  93. /*
  94. * Use the session max response size as the basis for setting
  95. * GETDEVICEINFO's maxcount
  96. */
  97. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  98. if (server->pnfs_curr_ld->max_deviceinfo_size &&
  99. server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz)
  100. max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size;
  101. max_pages = nfs_page_array_len(0, max_resp_sz);
  102. dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
  103. __func__, server, max_resp_sz, max_pages);
  104. pdev = kzalloc(sizeof(*pdev), gfp_flags);
  105. if (!pdev)
  106. return NULL;
  107. pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
  108. if (!pages)
  109. goto out_free_pdev;
  110. for (i = 0; i < max_pages; i++) {
  111. pages[i] = alloc_page(gfp_flags);
  112. if (!pages[i])
  113. goto out_free_pages;
  114. }
  115. memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
  116. pdev->layout_type = server->pnfs_curr_ld->id;
  117. pdev->pages = pages;
  118. pdev->pgbase = 0;
  119. pdev->pglen = max_resp_sz;
  120. pdev->mincount = 0;
  121. pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
  122. rc = nfs4_proc_getdeviceinfo(server, pdev, cred);
  123. dprintk("%s getdevice info returns %d\n", __func__, rc);
  124. if (rc)
  125. goto out_free_pages;
  126. /*
  127. * Found new device, need to decode it and then add it to the
  128. * list of known devices for this mountpoint.
  129. */
  130. d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
  131. gfp_flags);
  132. if (d && pdev->nocache)
  133. set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
  134. out_free_pages:
  135. for (i = 0; i < max_pages; i++)
  136. __free_page(pages[i]);
  137. kfree(pages);
  138. out_free_pdev:
  139. kfree(pdev);
  140. dprintk("<-- %s d %p\n", __func__, d);
  141. return d;
  142. }
  143. /*
  144. * Lookup a deviceid in cache and get a reference count on it if found
  145. *
  146. * @clp nfs_client associated with deviceid
  147. * @id deviceid to look up
  148. */
  149. static struct nfs4_deviceid_node *
  150. __nfs4_find_get_deviceid(struct nfs_server *server,
  151. const struct nfs4_deviceid *id, long hash)
  152. {
  153. struct nfs4_deviceid_node *d;
  154. rcu_read_lock();
  155. d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
  156. hash);
  157. if (d != NULL && !atomic_inc_not_zero(&d->ref))
  158. d = NULL;
  159. rcu_read_unlock();
  160. return d;
  161. }
  162. struct nfs4_deviceid_node *
  163. nfs4_find_get_deviceid(struct nfs_server *server,
  164. const struct nfs4_deviceid *id, const struct cred *cred,
  165. gfp_t gfp_mask)
  166. {
  167. long hash = nfs4_deviceid_hash(id);
  168. struct nfs4_deviceid_node *d, *new;
  169. d = __nfs4_find_get_deviceid(server, id, hash);
  170. if (d)
  171. return d;
  172. new = nfs4_get_device_info(server, id, cred, gfp_mask);
  173. if (!new)
  174. return new;
  175. spin_lock(&nfs4_deviceid_lock);
  176. d = __nfs4_find_get_deviceid(server, id, hash);
  177. if (d) {
  178. spin_unlock(&nfs4_deviceid_lock);
  179. server->pnfs_curr_ld->free_deviceid_node(new);
  180. return d;
  181. }
  182. hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
  183. atomic_inc(&new->ref);
  184. spin_unlock(&nfs4_deviceid_lock);
  185. return new;
  186. }
  187. EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
  188. /*
  189. * Remove a deviceid from cache
  190. *
  191. * @clp nfs_client associated with deviceid
  192. * @id the deviceid to unhash
  193. *
  194. * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
  195. */
  196. void
  197. nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
  198. const struct nfs_client *clp, const struct nfs4_deviceid *id)
  199. {
  200. struct nfs4_deviceid_node *d;
  201. spin_lock(&nfs4_deviceid_lock);
  202. rcu_read_lock();
  203. d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
  204. rcu_read_unlock();
  205. if (!d) {
  206. spin_unlock(&nfs4_deviceid_lock);
  207. return;
  208. }
  209. hlist_del_init_rcu(&d->node);
  210. clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
  211. spin_unlock(&nfs4_deviceid_lock);
  212. /* balance the initial ref set in pnfs_insert_deviceid */
  213. nfs4_put_deviceid_node(d);
  214. }
  215. EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
  216. void
  217. nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server,
  218. const struct nfs4_deviceid *id)
  219. {
  220. INIT_HLIST_NODE(&d->node);
  221. INIT_HLIST_NODE(&d->tmpnode);
  222. d->ld = server->pnfs_curr_ld;
  223. d->nfs_client = server->nfs_client;
  224. d->flags = 0;
  225. d->deviceid = *id;
  226. atomic_set(&d->ref, 1);
  227. }
  228. EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
  229. /*
  230. * Dereference a deviceid node and delete it when its reference count drops
  231. * to zero.
  232. *
  233. * @d deviceid node to put
  234. *
  235. * return true iff the node was deleted
  236. * Note that since the test for d->ref == 0 is sufficient to establish
  237. * that the node is no longer hashed in the global device id cache.
  238. */
  239. bool
  240. nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
  241. {
  242. if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) {
  243. if (atomic_add_unless(&d->ref, -1, 2))
  244. return false;
  245. nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid);
  246. }
  247. if (!atomic_dec_and_test(&d->ref))
  248. return false;
  249. d->ld->free_deviceid_node(d);
  250. return true;
  251. }
  252. EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
  253. void
  254. nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node)
  255. {
  256. if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
  257. clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
  258. smp_mb__after_atomic();
  259. }
  260. }
  261. EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available);
  262. void
  263. nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
  264. {
  265. node->timestamp_unavailable = jiffies;
  266. smp_mb__before_atomic();
  267. set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
  268. smp_mb__after_atomic();
  269. }
  270. EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
  271. bool
  272. nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
  273. {
  274. if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
  275. unsigned long start, end;
  276. end = jiffies;
  277. start = end - PNFS_DEVICE_RETRY_TIMEOUT;
  278. if (time_in_range(node->timestamp_unavailable, start, end))
  279. return true;
  280. clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
  281. smp_mb__after_atomic();
  282. }
  283. return false;
  284. }
  285. EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
  286. static void
  287. _deviceid_purge_client(const struct nfs_client *clp, long hash)
  288. {
  289. struct nfs4_deviceid_node *d;
  290. HLIST_HEAD(tmp);
  291. spin_lock(&nfs4_deviceid_lock);
  292. rcu_read_lock();
  293. hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
  294. if (d->nfs_client == clp && atomic_read(&d->ref)) {
  295. hlist_del_init_rcu(&d->node);
  296. hlist_add_head(&d->tmpnode, &tmp);
  297. clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
  298. }
  299. rcu_read_unlock();
  300. spin_unlock(&nfs4_deviceid_lock);
  301. if (hlist_empty(&tmp))
  302. return;
  303. while (!hlist_empty(&tmp)) {
  304. d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
  305. hlist_del(&d->tmpnode);
  306. nfs4_put_deviceid_node(d);
  307. }
  308. }
  309. void
  310. nfs4_deviceid_purge_client(const struct nfs_client *clp)
  311. {
  312. long h;
  313. if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
  314. return;
  315. for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
  316. _deviceid_purge_client(clp, h);
  317. }
  318. /*
  319. * Stop use of all deviceids associated with an nfs_client
  320. */
  321. void
  322. nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
  323. {
  324. struct nfs4_deviceid_node *d;
  325. int i;
  326. rcu_read_lock();
  327. for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
  328. hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
  329. if (d->nfs_client == clp)
  330. set_bit(NFS_DEVICEID_INVALID, &d->flags);
  331. }
  332. rcu_read_unlock();
  333. }