passthru.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NVMe Over Fabrics Target Passthrough command implementation.
  4. *
  5. * Copyright (c) 2017-2018 Western Digital Corporation or its
  6. * affiliates.
  7. * Copyright (c) 2019-2020, Eideticom Inc.
  8. *
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/module.h>
  12. #include "../host/nvme.h"
  13. #include "nvmet.h"
  14. MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
  15. /*
  16. * xarray to maintain one passthru subsystem per nvme controller.
  17. */
  18. static DEFINE_XARRAY(passthru_subsystems);
  19. static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
  20. {
  21. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  22. struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
  23. u16 status = NVME_SC_SUCCESS;
  24. struct nvme_id_ctrl *id;
  25. int max_hw_sectors;
  26. int page_shift;
  27. id = kzalloc(sizeof(*id), GFP_KERNEL);
  28. if (!id)
  29. return NVME_SC_INTERNAL;
  30. status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
  31. if (status)
  32. goto out_free;
  33. id->cntlid = cpu_to_le16(ctrl->cntlid);
  34. id->ver = cpu_to_le32(ctrl->subsys->ver);
  35. /*
  36. * The passthru NVMe driver may have a limit on the number of segments
  37. * which depends on the host's memory fragementation. To solve this,
  38. * ensure mdts is limited to the pages equal to the number of segments.
  39. */
  40. max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
  41. pctrl->max_hw_sectors);
  42. /*
  43. * nvmet_passthru_map_sg is limitted to using a single bio so limit
  44. * the mdts based on BIO_MAX_PAGES as well
  45. */
  46. max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
  47. max_hw_sectors);
  48. page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
  49. id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
  50. id->acl = 3;
  51. /*
  52. * We export aerl limit for the fabrics controller, update this when
  53. * passthru based aerl support is added.
  54. */
  55. id->aerl = NVMET_ASYNC_EVENTS - 1;
  56. /* emulate kas as most of the PCIe ctrl don't have a support for kas */
  57. id->kas = cpu_to_le16(NVMET_KAS);
  58. /* don't support host memory buffer */
  59. id->hmpre = 0;
  60. id->hmmin = 0;
  61. id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
  62. id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
  63. id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
  64. /* don't support fuse commands */
  65. id->fuses = 0;
  66. id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
  67. if (ctrl->ops->flags & NVMF_KEYED_SGLS)
  68. id->sgls |= cpu_to_le32(1 << 2);
  69. if (req->port->inline_data_size)
  70. id->sgls |= cpu_to_le32(1 << 20);
  71. /*
  72. * When passsthru controller is setup using nvme-loop transport it will
  73. * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
  74. * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
  75. * code path with duplicate ctr subsynqn. In order to prevent that we
  76. * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
  77. */
  78. memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
  79. /* use fabric id-ctrl values */
  80. id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
  81. req->port->inline_data_size) / 16);
  82. id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
  83. id->msdbd = ctrl->ops->msdbd;
  84. /* Support multipath connections with fabrics */
  85. id->cmic |= 1 << 1;
  86. /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
  87. id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
  88. status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
  89. out_free:
  90. kfree(id);
  91. return status;
  92. }
  93. static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
  94. {
  95. u16 status = NVME_SC_SUCCESS;
  96. struct nvme_id_ns *id;
  97. int i;
  98. id = kzalloc(sizeof(*id), GFP_KERNEL);
  99. if (!id)
  100. return NVME_SC_INTERNAL;
  101. status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
  102. if (status)
  103. goto out_free;
  104. for (i = 0; i < (id->nlbaf + 1); i++)
  105. if (id->lbaf[i].ms)
  106. memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
  107. id->flbas = id->flbas & ~(1 << 4);
  108. /*
  109. * Presently the NVMEof target code does not support sending
  110. * metadata, so we must disable it here. This should be updated
  111. * once target starts supporting metadata.
  112. */
  113. id->mc = 0;
  114. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  115. out_free:
  116. kfree(id);
  117. return status;
  118. }
  119. static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
  120. {
  121. struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
  122. struct request *rq = req->p.rq;
  123. u16 status;
  124. nvme_execute_passthru_rq(rq);
  125. status = nvme_req(rq)->status;
  126. if (status == NVME_SC_SUCCESS &&
  127. req->cmd->common.opcode == nvme_admin_identify) {
  128. switch (req->cmd->identify.cns) {
  129. case NVME_ID_CNS_CTRL:
  130. nvmet_passthru_override_id_ctrl(req);
  131. break;
  132. case NVME_ID_CNS_NS:
  133. nvmet_passthru_override_id_ns(req);
  134. break;
  135. }
  136. }
  137. req->cqe->result = nvme_req(rq)->result;
  138. nvmet_req_complete(req, status);
  139. blk_mq_free_request(rq);
  140. }
  141. static void nvmet_passthru_req_done(struct request *rq,
  142. blk_status_t blk_status)
  143. {
  144. struct nvmet_req *req = rq->end_io_data;
  145. req->cqe->result = nvme_req(rq)->result;
  146. nvmet_req_complete(req, nvme_req(rq)->status);
  147. blk_mq_free_request(rq);
  148. }
  149. static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
  150. {
  151. struct scatterlist *sg;
  152. int op_flags = 0;
  153. struct bio *bio;
  154. int i, ret;
  155. if (req->sg_cnt > BIO_MAX_PAGES)
  156. return -EINVAL;
  157. if (req->cmd->common.opcode == nvme_cmd_flush)
  158. op_flags = REQ_FUA;
  159. else if (nvme_is_write(req->cmd))
  160. op_flags = REQ_SYNC | REQ_IDLE;
  161. bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
  162. bio->bi_end_io = bio_put;
  163. bio->bi_opf = req_op(rq) | op_flags;
  164. for_each_sg(req->sg, sg, req->sg_cnt, i) {
  165. if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
  166. sg->offset) < sg->length) {
  167. bio_put(bio);
  168. return -EINVAL;
  169. }
  170. }
  171. ret = blk_rq_append_bio(rq, &bio);
  172. if (unlikely(ret)) {
  173. bio_put(bio);
  174. return ret;
  175. }
  176. return 0;
  177. }
  178. static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
  179. {
  180. struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
  181. struct request_queue *q = ctrl->admin_q;
  182. struct nvme_ns *ns = NULL;
  183. struct request *rq = NULL;
  184. u32 effects;
  185. u16 status;
  186. int ret;
  187. if (likely(req->sq->qid != 0)) {
  188. u32 nsid = le32_to_cpu(req->cmd->common.nsid);
  189. ns = nvme_find_get_ns(ctrl, nsid);
  190. if (unlikely(!ns)) {
  191. pr_err("failed to get passthru ns nsid:%u\n", nsid);
  192. status = NVME_SC_INVALID_NS | NVME_SC_DNR;
  193. goto out;
  194. }
  195. q = ns->queue;
  196. }
  197. rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
  198. if (IS_ERR(rq)) {
  199. status = NVME_SC_INTERNAL;
  200. goto out_put_ns;
  201. }
  202. if (req->sg_cnt) {
  203. ret = nvmet_passthru_map_sg(req, rq);
  204. if (unlikely(ret)) {
  205. status = NVME_SC_INTERNAL;
  206. goto out_put_req;
  207. }
  208. }
  209. /*
  210. * If there are effects for the command we are about to execute, or
  211. * an end_req function we need to use nvme_execute_passthru_rq()
  212. * synchronously in a work item seeing the end_req function and
  213. * nvme_passthru_end() can't be called in the request done callback
  214. * which is typically in interrupt context.
  215. */
  216. effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
  217. if (req->p.use_workqueue || effects) {
  218. INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
  219. req->p.rq = rq;
  220. schedule_work(&req->p.work);
  221. } else {
  222. rq->end_io_data = req;
  223. blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
  224. nvmet_passthru_req_done);
  225. }
  226. if (ns)
  227. nvme_put_ns(ns);
  228. return;
  229. out_put_req:
  230. blk_mq_free_request(rq);
  231. out_put_ns:
  232. if (ns)
  233. nvme_put_ns(ns);
  234. out:
  235. nvmet_req_complete(req, status);
  236. }
  237. /*
  238. * We need to emulate set host behaviour to ensure that any requested
  239. * behaviour of the target's host matches the requested behaviour
  240. * of the device's host and fail otherwise.
  241. */
  242. static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
  243. {
  244. struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
  245. struct nvme_feat_host_behavior *host;
  246. u16 status = NVME_SC_INTERNAL;
  247. int ret;
  248. host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
  249. if (!host)
  250. goto out_complete_req;
  251. ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
  252. host, sizeof(*host), NULL);
  253. if (ret)
  254. goto out_free_host;
  255. status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
  256. if (status)
  257. goto out_free_host;
  258. if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
  259. pr_warn("target host has requested different behaviour from the local host\n");
  260. status = NVME_SC_INTERNAL;
  261. }
  262. out_free_host:
  263. kfree(host);
  264. out_complete_req:
  265. nvmet_req_complete(req, status);
  266. }
  267. static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
  268. {
  269. req->p.use_workqueue = false;
  270. req->execute = nvmet_passthru_execute_cmd;
  271. return NVME_SC_SUCCESS;
  272. }
  273. u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
  274. {
  275. /* Reject any commands with non-sgl flags set (ie. fused commands) */
  276. if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
  277. return NVME_SC_INVALID_FIELD;
  278. switch (req->cmd->common.opcode) {
  279. case nvme_cmd_resv_register:
  280. case nvme_cmd_resv_report:
  281. case nvme_cmd_resv_acquire:
  282. case nvme_cmd_resv_release:
  283. /*
  284. * Reservations cannot be supported properly because the
  285. * underlying device has no way of differentiating different
  286. * hosts that connect via fabrics. This could potentially be
  287. * emulated in the future if regular targets grow support for
  288. * this feature.
  289. */
  290. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  291. }
  292. return nvmet_setup_passthru_command(req);
  293. }
  294. /*
  295. * Only features that are emulated or specifically allowed in the list are
  296. * passed down to the controller. This function implements the allow list for
  297. * both get and set features.
  298. */
  299. static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
  300. {
  301. switch (le32_to_cpu(req->cmd->features.fid)) {
  302. case NVME_FEAT_ARBITRATION:
  303. case NVME_FEAT_POWER_MGMT:
  304. case NVME_FEAT_LBA_RANGE:
  305. case NVME_FEAT_TEMP_THRESH:
  306. case NVME_FEAT_ERR_RECOVERY:
  307. case NVME_FEAT_VOLATILE_WC:
  308. case NVME_FEAT_WRITE_ATOMIC:
  309. case NVME_FEAT_AUTO_PST:
  310. case NVME_FEAT_TIMESTAMP:
  311. case NVME_FEAT_HCTM:
  312. case NVME_FEAT_NOPSC:
  313. case NVME_FEAT_RRL:
  314. case NVME_FEAT_PLM_CONFIG:
  315. case NVME_FEAT_PLM_WINDOW:
  316. case NVME_FEAT_HOST_BEHAVIOR:
  317. case NVME_FEAT_SANITIZE:
  318. case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
  319. return nvmet_setup_passthru_command(req);
  320. case NVME_FEAT_ASYNC_EVENT:
  321. /* There is no support for forwarding ASYNC events */
  322. case NVME_FEAT_IRQ_COALESCE:
  323. case NVME_FEAT_IRQ_CONFIG:
  324. /* The IRQ settings will not apply to the target controller */
  325. case NVME_FEAT_HOST_MEM_BUF:
  326. /*
  327. * Any HMB that's set will not be passed through and will
  328. * not work as expected
  329. */
  330. case NVME_FEAT_SW_PROGRESS:
  331. /*
  332. * The Pre-Boot Software Load Count doesn't make much
  333. * sense for a target to export
  334. */
  335. case NVME_FEAT_RESV_MASK:
  336. case NVME_FEAT_RESV_PERSIST:
  337. /* No reservations, see nvmet_parse_passthru_io_cmd() */
  338. default:
  339. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  340. }
  341. }
  342. u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
  343. {
  344. /* Reject any commands with non-sgl flags set (ie. fused commands) */
  345. if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
  346. return NVME_SC_INVALID_FIELD;
  347. /*
  348. * Passthru all vendor specific commands
  349. */
  350. if (req->cmd->common.opcode >= nvme_admin_vendor_start)
  351. return nvmet_setup_passthru_command(req);
  352. switch (req->cmd->common.opcode) {
  353. case nvme_admin_async_event:
  354. req->execute = nvmet_execute_async_event;
  355. return NVME_SC_SUCCESS;
  356. case nvme_admin_keep_alive:
  357. /*
  358. * Most PCIe ctrls don't support keep alive cmd, we route keep
  359. * alive to the non-passthru mode. In future please change this
  360. * code when PCIe ctrls with keep alive support available.
  361. */
  362. req->execute = nvmet_execute_keep_alive;
  363. return NVME_SC_SUCCESS;
  364. case nvme_admin_set_features:
  365. switch (le32_to_cpu(req->cmd->features.fid)) {
  366. case NVME_FEAT_ASYNC_EVENT:
  367. case NVME_FEAT_KATO:
  368. case NVME_FEAT_NUM_QUEUES:
  369. case NVME_FEAT_HOST_ID:
  370. req->execute = nvmet_execute_set_features;
  371. return NVME_SC_SUCCESS;
  372. case NVME_FEAT_HOST_BEHAVIOR:
  373. req->execute = nvmet_passthru_set_host_behaviour;
  374. return NVME_SC_SUCCESS;
  375. default:
  376. return nvmet_passthru_get_set_features(req);
  377. }
  378. break;
  379. case nvme_admin_get_features:
  380. switch (le32_to_cpu(req->cmd->features.fid)) {
  381. case NVME_FEAT_ASYNC_EVENT:
  382. case NVME_FEAT_KATO:
  383. case NVME_FEAT_NUM_QUEUES:
  384. case NVME_FEAT_HOST_ID:
  385. req->execute = nvmet_execute_get_features;
  386. return NVME_SC_SUCCESS;
  387. default:
  388. return nvmet_passthru_get_set_features(req);
  389. }
  390. break;
  391. case nvme_admin_identify:
  392. switch (req->cmd->identify.cns) {
  393. case NVME_ID_CNS_CTRL:
  394. req->execute = nvmet_passthru_execute_cmd;
  395. req->p.use_workqueue = true;
  396. return NVME_SC_SUCCESS;
  397. case NVME_ID_CNS_CS_CTRL:
  398. switch (req->cmd->identify.csi) {
  399. case NVME_CSI_ZNS:
  400. req->execute = nvmet_passthru_execute_cmd;
  401. req->p.use_workqueue = true;
  402. return NVME_SC_SUCCESS;
  403. }
  404. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  405. case NVME_ID_CNS_NS:
  406. req->execute = nvmet_passthru_execute_cmd;
  407. req->p.use_workqueue = true;
  408. return NVME_SC_SUCCESS;
  409. case NVME_ID_CNS_CS_NS:
  410. switch (req->cmd->identify.csi) {
  411. case NVME_CSI_ZNS:
  412. req->execute = nvmet_passthru_execute_cmd;
  413. req->p.use_workqueue = true;
  414. return NVME_SC_SUCCESS;
  415. }
  416. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  417. default:
  418. return nvmet_setup_passthru_command(req);
  419. }
  420. case nvme_admin_get_log_page:
  421. return nvmet_setup_passthru_command(req);
  422. default:
  423. /* Reject commands not in the allowlist above */
  424. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  425. }
  426. }
  427. int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
  428. {
  429. struct nvme_ctrl *ctrl;
  430. struct file *file;
  431. int ret = -EINVAL;
  432. void *old;
  433. mutex_lock(&subsys->lock);
  434. if (!subsys->passthru_ctrl_path)
  435. goto out_unlock;
  436. if (subsys->passthru_ctrl)
  437. goto out_unlock;
  438. if (subsys->nr_namespaces) {
  439. pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
  440. goto out_unlock;
  441. }
  442. file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
  443. if (IS_ERR(file)) {
  444. ret = PTR_ERR(file);
  445. goto out_unlock;
  446. }
  447. ctrl = nvme_ctrl_from_file(file);
  448. if (!ctrl) {
  449. pr_err("failed to open nvme controller %s\n",
  450. subsys->passthru_ctrl_path);
  451. goto out_put_file;
  452. }
  453. old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
  454. subsys, GFP_KERNEL);
  455. if (xa_is_err(old)) {
  456. ret = xa_err(old);
  457. goto out_put_file;
  458. }
  459. if (old)
  460. goto out_put_file;
  461. subsys->passthru_ctrl = ctrl;
  462. subsys->ver = ctrl->vs;
  463. if (subsys->ver < NVME_VS(1, 2, 1)) {
  464. pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
  465. NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
  466. NVME_TERTIARY(subsys->ver));
  467. subsys->ver = NVME_VS(1, 2, 1);
  468. }
  469. nvme_get_ctrl(ctrl);
  470. __module_get(subsys->passthru_ctrl->ops->module);
  471. ret = 0;
  472. out_put_file:
  473. filp_close(file, NULL);
  474. out_unlock:
  475. mutex_unlock(&subsys->lock);
  476. return ret;
  477. }
  478. static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
  479. {
  480. if (subsys->passthru_ctrl) {
  481. xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
  482. module_put(subsys->passthru_ctrl->ops->module);
  483. nvme_put_ctrl(subsys->passthru_ctrl);
  484. }
  485. subsys->passthru_ctrl = NULL;
  486. subsys->ver = NVMET_DEFAULT_VS;
  487. }
  488. void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
  489. {
  490. mutex_lock(&subsys->lock);
  491. __nvmet_passthru_ctrl_disable(subsys);
  492. mutex_unlock(&subsys->lock);
  493. }
  494. void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
  495. {
  496. mutex_lock(&subsys->lock);
  497. __nvmet_passthru_ctrl_disable(subsys);
  498. mutex_unlock(&subsys->lock);
  499. kfree(subsys->passthru_ctrl_path);
  500. }