target_core_hba.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*******************************************************************************
  3. * Filename: target_core_hba.c
  4. *
  5. * This file contains the TCM HBA Transport related functions.
  6. *
  7. * (c) Copyright 2003-2013 Datera, Inc.
  8. *
  9. * Nicholas A. Bellinger <nab@kernel.org>
  10. *
  11. ******************************************************************************/
  12. #include <linux/net.h>
  13. #include <linux/string.h>
  14. #include <linux/timer.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/in.h>
  18. #include <linux/module.h>
  19. #include <net/sock.h>
  20. #include <net/tcp.h>
  21. #include <target/target_core_base.h>
  22. #include <target/target_core_backend.h>
  23. #include <target/target_core_fabric.h>
  24. #include "target_core_internal.h"
  25. static LIST_HEAD(backend_list);
  26. static DEFINE_MUTEX(backend_mutex);
  27. static u32 hba_id_counter;
  28. static DEFINE_SPINLOCK(hba_lock);
  29. static LIST_HEAD(hba_list);
  30. int transport_backend_register(const struct target_backend_ops *ops)
  31. {
  32. struct target_backend *tb, *old;
  33. tb = kzalloc(sizeof(*tb), GFP_KERNEL);
  34. if (!tb)
  35. return -ENOMEM;
  36. tb->ops = ops;
  37. mutex_lock(&backend_mutex);
  38. list_for_each_entry(old, &backend_list, list) {
  39. if (!strcmp(old->ops->name, ops->name)) {
  40. pr_err("backend %s already registered.\n", ops->name);
  41. mutex_unlock(&backend_mutex);
  42. kfree(tb);
  43. return -EEXIST;
  44. }
  45. }
  46. target_setup_backend_cits(tb);
  47. list_add_tail(&tb->list, &backend_list);
  48. mutex_unlock(&backend_mutex);
  49. pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
  50. ops->name, ops->owner);
  51. return 0;
  52. }
  53. EXPORT_SYMBOL(transport_backend_register);
  54. void target_backend_unregister(const struct target_backend_ops *ops)
  55. {
  56. struct target_backend *tb;
  57. mutex_lock(&backend_mutex);
  58. list_for_each_entry(tb, &backend_list, list) {
  59. if (tb->ops == ops) {
  60. list_del(&tb->list);
  61. mutex_unlock(&backend_mutex);
  62. /*
  63. * Wait for any outstanding backend driver ->rcu_head
  64. * callbacks to complete post TBO->free_device() ->
  65. * call_rcu(), before allowing backend driver module
  66. * unload of target_backend_ops->owner to proceed.
  67. */
  68. rcu_barrier();
  69. kfree(tb);
  70. return;
  71. }
  72. }
  73. mutex_unlock(&backend_mutex);
  74. }
  75. EXPORT_SYMBOL(target_backend_unregister);
  76. static struct target_backend *core_get_backend(const char *name)
  77. {
  78. struct target_backend *tb;
  79. mutex_lock(&backend_mutex);
  80. list_for_each_entry(tb, &backend_list, list) {
  81. if (!strcmp(tb->ops->name, name))
  82. goto found;
  83. }
  84. mutex_unlock(&backend_mutex);
  85. return NULL;
  86. found:
  87. if (tb->ops->owner && !try_module_get(tb->ops->owner))
  88. tb = NULL;
  89. mutex_unlock(&backend_mutex);
  90. return tb;
  91. }
  92. struct se_hba *
  93. core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
  94. {
  95. struct se_hba *hba;
  96. int ret = 0;
  97. hba = kzalloc(sizeof(*hba), GFP_KERNEL);
  98. if (!hba) {
  99. pr_err("Unable to allocate struct se_hba\n");
  100. return ERR_PTR(-ENOMEM);
  101. }
  102. spin_lock_init(&hba->device_lock);
  103. mutex_init(&hba->hba_access_mutex);
  104. hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
  105. hba->hba_flags |= hba_flags;
  106. hba->backend = core_get_backend(plugin_name);
  107. if (!hba->backend) {
  108. ret = -EINVAL;
  109. goto out_free_hba;
  110. }
  111. ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
  112. if (ret < 0)
  113. goto out_module_put;
  114. spin_lock(&hba_lock);
  115. hba->hba_id = hba_id_counter++;
  116. list_add_tail(&hba->hba_node, &hba_list);
  117. spin_unlock(&hba_lock);
  118. pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
  119. " Core\n", hba->hba_id);
  120. return hba;
  121. out_module_put:
  122. module_put(hba->backend->ops->owner);
  123. hba->backend = NULL;
  124. out_free_hba:
  125. kfree(hba);
  126. return ERR_PTR(ret);
  127. }
  128. int
  129. core_delete_hba(struct se_hba *hba)
  130. {
  131. WARN_ON(hba->dev_count);
  132. hba->backend->ops->detach_hba(hba);
  133. spin_lock(&hba_lock);
  134. list_del(&hba->hba_node);
  135. spin_unlock(&hba_lock);
  136. pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
  137. " Core\n", hba->hba_id);
  138. module_put(hba->backend->ops->owner);
  139. hba->backend = NULL;
  140. kfree(hba);
  141. return 0;
  142. }
  143. bool target_sense_desc_format(struct se_device *dev)
  144. {
  145. return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
  146. }