discovery.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Discovery service for the NVMe over Fabrics target.
  4. * Copyright (C) 2016 Intel Corporation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/slab.h>
  8. #include <generated/utsrelease.h>
  9. #include "nvmet.h"
  10. struct nvmet_subsys *nvmet_disc_subsys;
  11. static u64 nvmet_genctr;
  12. static void __nvmet_disc_changed(struct nvmet_port *port,
  13. struct nvmet_ctrl *ctrl)
  14. {
  15. if (ctrl->port != port)
  16. return;
  17. if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
  18. return;
  19. nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
  20. NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
  21. }
  22. void nvmet_port_disc_changed(struct nvmet_port *port,
  23. struct nvmet_subsys *subsys)
  24. {
  25. struct nvmet_ctrl *ctrl;
  26. lockdep_assert_held(&nvmet_config_sem);
  27. nvmet_genctr++;
  28. mutex_lock(&nvmet_disc_subsys->lock);
  29. list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
  30. if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
  31. continue;
  32. __nvmet_disc_changed(port, ctrl);
  33. }
  34. mutex_unlock(&nvmet_disc_subsys->lock);
  35. /* If transport can signal change, notify transport */
  36. if (port->tr_ops && port->tr_ops->discovery_chg)
  37. port->tr_ops->discovery_chg(port);
  38. }
  39. static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
  40. struct nvmet_subsys *subsys,
  41. struct nvmet_host *host)
  42. {
  43. struct nvmet_ctrl *ctrl;
  44. mutex_lock(&nvmet_disc_subsys->lock);
  45. list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
  46. if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
  47. continue;
  48. __nvmet_disc_changed(port, ctrl);
  49. }
  50. mutex_unlock(&nvmet_disc_subsys->lock);
  51. }
  52. void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
  53. struct nvmet_host *host)
  54. {
  55. struct nvmet_port *port;
  56. struct nvmet_subsys_link *s;
  57. nvmet_genctr++;
  58. list_for_each_entry(port, nvmet_ports, global_entry)
  59. list_for_each_entry(s, &port->subsystems, entry) {
  60. if (s->subsys != subsys)
  61. continue;
  62. __nvmet_subsys_disc_changed(port, subsys, host);
  63. }
  64. }
  65. void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
  66. {
  67. down_write(&nvmet_config_sem);
  68. if (list_empty(&port->entry)) {
  69. list_add_tail(&port->entry, &parent->referrals);
  70. port->enabled = true;
  71. nvmet_port_disc_changed(parent, NULL);
  72. }
  73. up_write(&nvmet_config_sem);
  74. }
  75. void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
  76. {
  77. down_write(&nvmet_config_sem);
  78. if (!list_empty(&port->entry)) {
  79. port->enabled = false;
  80. list_del_init(&port->entry);
  81. nvmet_port_disc_changed(parent, NULL);
  82. }
  83. up_write(&nvmet_config_sem);
  84. }
  85. static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
  86. struct nvmet_port *port, char *subsys_nqn, char *traddr,
  87. u8 type, u32 numrec)
  88. {
  89. struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
  90. e->trtype = port->disc_addr.trtype;
  91. e->adrfam = port->disc_addr.adrfam;
  92. e->treq = port->disc_addr.treq;
  93. e->portid = port->disc_addr.portid;
  94. /* we support only dynamic controllers */
  95. e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
  96. e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
  97. e->subtype = type;
  98. memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
  99. memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
  100. memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
  101. strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
  102. }
  103. /*
  104. * nvmet_set_disc_traddr - set a correct discovery log entry traddr
  105. *
  106. * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
  107. * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
  108. * must not contain that "any" IP address. If the transport implements
  109. * .disc_traddr, use it. this callback will set the discovery traddr
  110. * from the req->port address in case the port in question listens
  111. * "any" IP address.
  112. */
  113. static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
  114. char *traddr)
  115. {
  116. if (req->ops->disc_traddr)
  117. req->ops->disc_traddr(req, port, traddr);
  118. else
  119. memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
  120. }
  121. static size_t discovery_log_entries(struct nvmet_req *req)
  122. {
  123. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  124. struct nvmet_subsys_link *p;
  125. struct nvmet_port *r;
  126. size_t entries = 0;
  127. list_for_each_entry(p, &req->port->subsystems, entry) {
  128. if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
  129. continue;
  130. entries++;
  131. }
  132. list_for_each_entry(r, &req->port->referrals, entry)
  133. entries++;
  134. return entries;
  135. }
  136. static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
  137. {
  138. const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
  139. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  140. struct nvmf_disc_rsp_page_hdr *hdr;
  141. u64 offset = nvmet_get_log_page_offset(req->cmd);
  142. size_t data_len = nvmet_get_log_page_len(req->cmd);
  143. size_t alloc_len;
  144. struct nvmet_subsys_link *p;
  145. struct nvmet_port *r;
  146. u32 numrec = 0;
  147. u16 status = 0;
  148. void *buffer;
  149. if (!nvmet_check_transfer_len(req, data_len))
  150. return;
  151. if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
  152. req->error_loc =
  153. offsetof(struct nvme_get_log_page_command, lid);
  154. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  155. goto out;
  156. }
  157. /* Spec requires dword aligned offsets */
  158. if (offset & 0x3) {
  159. req->error_loc =
  160. offsetof(struct nvme_get_log_page_command, lpo);
  161. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  162. goto out;
  163. }
  164. /*
  165. * Make sure we're passing at least a buffer of response header size.
  166. * If host provided data len is less than the header size, only the
  167. * number of bytes requested by host will be sent to host.
  168. */
  169. down_read(&nvmet_config_sem);
  170. alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
  171. buffer = kzalloc(alloc_len, GFP_KERNEL);
  172. if (!buffer) {
  173. up_read(&nvmet_config_sem);
  174. status = NVME_SC_INTERNAL;
  175. goto out;
  176. }
  177. hdr = buffer;
  178. list_for_each_entry(p, &req->port->subsystems, entry) {
  179. char traddr[NVMF_TRADDR_SIZE];
  180. if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
  181. continue;
  182. nvmet_set_disc_traddr(req, req->port, traddr);
  183. nvmet_format_discovery_entry(hdr, req->port,
  184. p->subsys->subsysnqn, traddr,
  185. NVME_NQN_NVME, numrec);
  186. numrec++;
  187. }
  188. list_for_each_entry(r, &req->port->referrals, entry) {
  189. nvmet_format_discovery_entry(hdr, r,
  190. NVME_DISC_SUBSYS_NAME,
  191. r->disc_addr.traddr,
  192. NVME_NQN_DISC, numrec);
  193. numrec++;
  194. }
  195. hdr->genctr = cpu_to_le64(nvmet_genctr);
  196. hdr->numrec = cpu_to_le64(numrec);
  197. hdr->recfmt = cpu_to_le16(0);
  198. nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
  199. up_read(&nvmet_config_sem);
  200. status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
  201. kfree(buffer);
  202. out:
  203. nvmet_req_complete(req, status);
  204. }
  205. static void nvmet_execute_disc_identify(struct nvmet_req *req)
  206. {
  207. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  208. struct nvme_id_ctrl *id;
  209. const char model[] = "Linux";
  210. u16 status = 0;
  211. if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
  212. return;
  213. if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
  214. req->error_loc = offsetof(struct nvme_identify, cns);
  215. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  216. goto out;
  217. }
  218. id = kzalloc(sizeof(*id), GFP_KERNEL);
  219. if (!id) {
  220. status = NVME_SC_INTERNAL;
  221. goto out;
  222. }
  223. memset(id->sn, ' ', sizeof(id->sn));
  224. bin2hex(id->sn, &ctrl->subsys->serial,
  225. min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
  226. memset(id->fr, ' ', sizeof(id->fr));
  227. memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
  228. memcpy_and_pad(id->fr, sizeof(id->fr),
  229. UTS_RELEASE, strlen(UTS_RELEASE), ' ');
  230. /* no limit on data transfer sizes for now */
  231. id->mdts = 0;
  232. id->cntlid = cpu_to_le16(ctrl->cntlid);
  233. id->ver = cpu_to_le32(ctrl->subsys->ver);
  234. id->lpa = (1 << 2);
  235. /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
  236. id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
  237. id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
  238. if (ctrl->ops->flags & NVMF_KEYED_SGLS)
  239. id->sgls |= cpu_to_le32(1 << 2);
  240. if (req->port->inline_data_size)
  241. id->sgls |= cpu_to_le32(1 << 20);
  242. id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
  243. strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
  244. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  245. kfree(id);
  246. out:
  247. nvmet_req_complete(req, status);
  248. }
  249. static void nvmet_execute_disc_set_features(struct nvmet_req *req)
  250. {
  251. u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
  252. u16 stat;
  253. if (!nvmet_check_transfer_len(req, 0))
  254. return;
  255. switch (cdw10 & 0xff) {
  256. case NVME_FEAT_KATO:
  257. stat = nvmet_set_feat_kato(req);
  258. break;
  259. case NVME_FEAT_ASYNC_EVENT:
  260. stat = nvmet_set_feat_async_event(req,
  261. NVMET_DISC_AEN_CFG_OPTIONAL);
  262. break;
  263. default:
  264. req->error_loc =
  265. offsetof(struct nvme_common_command, cdw10);
  266. stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  267. break;
  268. }
  269. nvmet_req_complete(req, stat);
  270. }
  271. static void nvmet_execute_disc_get_features(struct nvmet_req *req)
  272. {
  273. u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
  274. u16 stat = 0;
  275. if (!nvmet_check_transfer_len(req, 0))
  276. return;
  277. switch (cdw10 & 0xff) {
  278. case NVME_FEAT_KATO:
  279. nvmet_get_feat_kato(req);
  280. break;
  281. case NVME_FEAT_ASYNC_EVENT:
  282. nvmet_get_feat_async_event(req);
  283. break;
  284. default:
  285. req->error_loc =
  286. offsetof(struct nvme_common_command, cdw10);
  287. stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  288. break;
  289. }
  290. nvmet_req_complete(req, stat);
  291. }
  292. u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
  293. {
  294. struct nvme_command *cmd = req->cmd;
  295. if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
  296. pr_err("got cmd %d while not ready\n",
  297. cmd->common.opcode);
  298. req->error_loc =
  299. offsetof(struct nvme_common_command, opcode);
  300. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  301. }
  302. switch (cmd->common.opcode) {
  303. case nvme_admin_set_features:
  304. req->execute = nvmet_execute_disc_set_features;
  305. return 0;
  306. case nvme_admin_get_features:
  307. req->execute = nvmet_execute_disc_get_features;
  308. return 0;
  309. case nvme_admin_async_event:
  310. req->execute = nvmet_execute_async_event;
  311. return 0;
  312. case nvme_admin_keep_alive:
  313. req->execute = nvmet_execute_keep_alive;
  314. return 0;
  315. case nvme_admin_get_log_page:
  316. req->execute = nvmet_execute_disc_get_log_page;
  317. return 0;
  318. case nvme_admin_identify:
  319. req->execute = nvmet_execute_disc_identify;
  320. return 0;
  321. default:
  322. pr_err("unhandled cmd %d\n", cmd->common.opcode);
  323. req->error_loc = offsetof(struct nvme_common_command, opcode);
  324. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  325. }
  326. }
  327. int __init nvmet_init_discovery(void)
  328. {
  329. nvmet_disc_subsys =
  330. nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
  331. return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
  332. }
  333. void nvmet_exit_discovery(void)
  334. {
  335. nvmet_subsys_put(nvmet_disc_subsys);
  336. }