admin-cmd.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NVMe admin command implementation.
  4. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/module.h>
  8. #include <linux/rculist.h>
  9. #include <linux/part_stat.h>
  10. #include <generated/utsrelease.h>
  11. #include <asm/unaligned.h>
  12. #include "nvmet.h"
  13. u32 nvmet_get_log_page_len(struct nvme_command *cmd)
  14. {
  15. u32 len = le16_to_cpu(cmd->get_log_page.numdu);
  16. len <<= 16;
  17. len += le16_to_cpu(cmd->get_log_page.numdl);
  18. /* NUMD is a 0's based value */
  19. len += 1;
  20. len *= sizeof(u32);
  21. return len;
  22. }
  23. static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
  24. {
  25. switch (cdw10 & 0xff) {
  26. case NVME_FEAT_HOST_ID:
  27. return sizeof(req->sq->ctrl->hostid);
  28. default:
  29. return 0;
  30. }
  31. }
  32. u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
  33. {
  34. return le64_to_cpu(cmd->get_log_page.lpo);
  35. }
  36. static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
  37. {
  38. nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
  39. }
  40. static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
  41. {
  42. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  43. unsigned long flags;
  44. off_t offset = 0;
  45. u64 slot;
  46. u64 i;
  47. spin_lock_irqsave(&ctrl->error_lock, flags);
  48. slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
  49. for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
  50. if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
  51. sizeof(struct nvme_error_slot)))
  52. break;
  53. if (slot == 0)
  54. slot = NVMET_ERROR_LOG_SLOTS - 1;
  55. else
  56. slot--;
  57. offset += sizeof(struct nvme_error_slot);
  58. }
  59. spin_unlock_irqrestore(&ctrl->error_lock, flags);
  60. nvmet_req_complete(req, 0);
  61. }
  62. static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
  63. struct nvme_smart_log *slog)
  64. {
  65. struct nvmet_ns *ns;
  66. u64 host_reads, host_writes, data_units_read, data_units_written;
  67. ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
  68. if (!ns) {
  69. pr_err("Could not find namespace id : %d\n",
  70. le32_to_cpu(req->cmd->get_log_page.nsid));
  71. req->error_loc = offsetof(struct nvme_rw_command, nsid);
  72. return NVME_SC_INVALID_NS;
  73. }
  74. /* we don't have the right data for file backed ns */
  75. if (!ns->bdev)
  76. goto out;
  77. host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
  78. data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
  79. sectors[READ]), 1000);
  80. host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
  81. data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
  82. sectors[WRITE]), 1000);
  83. put_unaligned_le64(host_reads, &slog->host_reads[0]);
  84. put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
  85. put_unaligned_le64(host_writes, &slog->host_writes[0]);
  86. put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
  87. out:
  88. nvmet_put_namespace(ns);
  89. return NVME_SC_SUCCESS;
  90. }
  91. static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
  92. struct nvme_smart_log *slog)
  93. {
  94. u64 host_reads = 0, host_writes = 0;
  95. u64 data_units_read = 0, data_units_written = 0;
  96. struct nvmet_ns *ns;
  97. struct nvmet_ctrl *ctrl;
  98. unsigned long idx;
  99. ctrl = req->sq->ctrl;
  100. xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
  101. /* we don't have the right data for file backed ns */
  102. if (!ns->bdev)
  103. continue;
  104. host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
  105. data_units_read += DIV_ROUND_UP(
  106. part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
  107. host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
  108. data_units_written += DIV_ROUND_UP(
  109. part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
  110. }
  111. put_unaligned_le64(host_reads, &slog->host_reads[0]);
  112. put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
  113. put_unaligned_le64(host_writes, &slog->host_writes[0]);
  114. put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
  115. return NVME_SC_SUCCESS;
  116. }
  117. static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
  118. {
  119. struct nvme_smart_log *log;
  120. u16 status = NVME_SC_INTERNAL;
  121. unsigned long flags;
  122. if (req->transfer_len != sizeof(*log))
  123. goto out;
  124. log = kzalloc(sizeof(*log), GFP_KERNEL);
  125. if (!log)
  126. goto out;
  127. if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
  128. status = nvmet_get_smart_log_all(req, log);
  129. else
  130. status = nvmet_get_smart_log_nsid(req, log);
  131. if (status)
  132. goto out_free_log;
  133. spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
  134. put_unaligned_le64(req->sq->ctrl->err_counter,
  135. &log->num_err_log_entries);
  136. spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
  137. status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
  138. out_free_log:
  139. kfree(log);
  140. out:
  141. nvmet_req_complete(req, status);
  142. }
  143. static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
  144. {
  145. u16 status = NVME_SC_INTERNAL;
  146. struct nvme_effects_log *log;
  147. log = kzalloc(sizeof(*log), GFP_KERNEL);
  148. if (!log)
  149. goto out;
  150. log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
  151. log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
  152. log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
  153. log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
  154. log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
  155. log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
  156. log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
  157. log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
  158. log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
  159. log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
  160. log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
  161. log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
  162. status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
  163. kfree(log);
  164. out:
  165. nvmet_req_complete(req, status);
  166. }
  167. static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
  168. {
  169. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  170. u16 status = NVME_SC_INTERNAL;
  171. size_t len;
  172. if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
  173. goto out;
  174. mutex_lock(&ctrl->lock);
  175. if (ctrl->nr_changed_ns == U32_MAX)
  176. len = sizeof(__le32);
  177. else
  178. len = ctrl->nr_changed_ns * sizeof(__le32);
  179. status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
  180. if (!status)
  181. status = nvmet_zero_sgl(req, len, req->transfer_len - len);
  182. ctrl->nr_changed_ns = 0;
  183. nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
  184. mutex_unlock(&ctrl->lock);
  185. out:
  186. nvmet_req_complete(req, status);
  187. }
  188. static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
  189. struct nvme_ana_group_desc *desc)
  190. {
  191. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  192. struct nvmet_ns *ns;
  193. unsigned long idx;
  194. u32 count = 0;
  195. if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
  196. xa_for_each(&ctrl->subsys->namespaces, idx, ns)
  197. if (ns->anagrpid == grpid)
  198. desc->nsids[count++] = cpu_to_le32(ns->nsid);
  199. }
  200. desc->grpid = cpu_to_le32(grpid);
  201. desc->nnsids = cpu_to_le32(count);
  202. desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
  203. desc->state = req->port->ana_state[grpid];
  204. memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
  205. return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
  206. }
  207. static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
  208. {
  209. struct nvme_ana_rsp_hdr hdr = { 0, };
  210. struct nvme_ana_group_desc *desc;
  211. size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
  212. size_t len;
  213. u32 grpid;
  214. u16 ngrps = 0;
  215. u16 status;
  216. status = NVME_SC_INTERNAL;
  217. desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
  218. NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
  219. if (!desc)
  220. goto out;
  221. down_read(&nvmet_ana_sem);
  222. for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
  223. if (!nvmet_ana_group_enabled[grpid])
  224. continue;
  225. len = nvmet_format_ana_group(req, grpid, desc);
  226. status = nvmet_copy_to_sgl(req, offset, desc, len);
  227. if (status)
  228. break;
  229. offset += len;
  230. ngrps++;
  231. }
  232. for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
  233. if (nvmet_ana_group_enabled[grpid])
  234. ngrps++;
  235. }
  236. hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
  237. hdr.ngrps = cpu_to_le16(ngrps);
  238. nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
  239. up_read(&nvmet_ana_sem);
  240. kfree(desc);
  241. /* copy the header last once we know the number of groups */
  242. status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
  243. out:
  244. nvmet_req_complete(req, status);
  245. }
  246. static void nvmet_execute_get_log_page(struct nvmet_req *req)
  247. {
  248. if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
  249. return;
  250. switch (req->cmd->get_log_page.lid) {
  251. case NVME_LOG_ERROR:
  252. return nvmet_execute_get_log_page_error(req);
  253. case NVME_LOG_SMART:
  254. return nvmet_execute_get_log_page_smart(req);
  255. case NVME_LOG_FW_SLOT:
  256. /*
  257. * We only support a single firmware slot which always is
  258. * active, so we can zero out the whole firmware slot log and
  259. * still claim to fully implement this mandatory log page.
  260. */
  261. return nvmet_execute_get_log_page_noop(req);
  262. case NVME_LOG_CHANGED_NS:
  263. return nvmet_execute_get_log_changed_ns(req);
  264. case NVME_LOG_CMD_EFFECTS:
  265. return nvmet_execute_get_log_cmd_effects_ns(req);
  266. case NVME_LOG_ANA:
  267. return nvmet_execute_get_log_page_ana(req);
  268. }
  269. pr_debug("unhandled lid %d on qid %d\n",
  270. req->cmd->get_log_page.lid, req->sq->qid);
  271. req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
  272. nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
  273. }
  274. static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
  275. struct nvmet_subsys *subsys)
  276. {
  277. const char *model = NVMET_DEFAULT_CTRL_MODEL;
  278. struct nvmet_subsys_model *subsys_model;
  279. rcu_read_lock();
  280. subsys_model = rcu_dereference(subsys->model);
  281. if (subsys_model)
  282. model = subsys_model->number;
  283. memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
  284. rcu_read_unlock();
  285. }
  286. static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
  287. {
  288. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  289. struct nvme_id_ctrl *id;
  290. u32 cmd_capsule_size;
  291. u16 status = 0;
  292. id = kzalloc(sizeof(*id), GFP_KERNEL);
  293. if (!id) {
  294. status = NVME_SC_INTERNAL;
  295. goto out;
  296. }
  297. /* XXX: figure out how to assign real vendors IDs. */
  298. id->vid = 0;
  299. id->ssvid = 0;
  300. memset(id->sn, ' ', sizeof(id->sn));
  301. bin2hex(id->sn, &ctrl->subsys->serial,
  302. min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
  303. nvmet_id_set_model_number(id, ctrl->subsys);
  304. memcpy_and_pad(id->fr, sizeof(id->fr),
  305. UTS_RELEASE, strlen(UTS_RELEASE), ' ');
  306. id->rab = 6;
  307. /*
  308. * XXX: figure out how we can assign a IEEE OUI, but until then
  309. * the safest is to leave it as zeroes.
  310. */
  311. /* we support multiple ports, multiples hosts and ANA: */
  312. id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
  313. /* Limit MDTS according to transport capability */
  314. if (ctrl->ops->get_mdts)
  315. id->mdts = ctrl->ops->get_mdts(ctrl);
  316. else
  317. id->mdts = 0;
  318. id->cntlid = cpu_to_le16(ctrl->cntlid);
  319. id->ver = cpu_to_le32(ctrl->subsys->ver);
  320. /* XXX: figure out what to do about RTD3R/RTD3 */
  321. id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
  322. id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
  323. NVME_CTRL_ATTR_TBKAS);
  324. id->oacs = 0;
  325. /*
  326. * We don't really have a practical limit on the number of abort
  327. * comands. But we don't do anything useful for abort either, so
  328. * no point in allowing more abort commands than the spec requires.
  329. */
  330. id->acl = 3;
  331. id->aerl = NVMET_ASYNC_EVENTS - 1;
  332. /* first slot is read-only, only one slot supported */
  333. id->frmw = (1 << 0) | (1 << 1);
  334. id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
  335. id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
  336. id->npss = 0;
  337. /* We support keep-alive timeout in granularity of seconds */
  338. id->kas = cpu_to_le16(NVMET_KAS);
  339. id->sqes = (0x6 << 4) | 0x6;
  340. id->cqes = (0x4 << 4) | 0x4;
  341. /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
  342. id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
  343. id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
  344. id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
  345. id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
  346. NVME_CTRL_ONCS_WRITE_ZEROES);
  347. /* XXX: don't report vwc if the underlying device is write through */
  348. id->vwc = NVME_CTRL_VWC_PRESENT;
  349. /*
  350. * We can't support atomic writes bigger than a LBA without support
  351. * from the backend device.
  352. */
  353. id->awun = 0;
  354. id->awupf = 0;
  355. id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
  356. if (ctrl->ops->flags & NVMF_KEYED_SGLS)
  357. id->sgls |= cpu_to_le32(1 << 2);
  358. if (req->port->inline_data_size)
  359. id->sgls |= cpu_to_le32(1 << 20);
  360. strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
  361. /*
  362. * Max command capsule size is sqe + in-capsule data size.
  363. * Disable in-capsule data for Metadata capable controllers.
  364. */
  365. cmd_capsule_size = sizeof(struct nvme_command);
  366. if (!ctrl->pi_support)
  367. cmd_capsule_size += req->port->inline_data_size;
  368. id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
  369. /* Max response capsule size is cqe */
  370. id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
  371. id->msdbd = ctrl->ops->msdbd;
  372. id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
  373. id->anatt = 10; /* random value */
  374. id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
  375. id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
  376. /*
  377. * Meh, we don't really support any power state. Fake up the same
  378. * values that qemu does.
  379. */
  380. id->psd[0].max_power = cpu_to_le16(0x9c4);
  381. id->psd[0].entry_lat = cpu_to_le32(0x10);
  382. id->psd[0].exit_lat = cpu_to_le32(0x4);
  383. id->nwpc = 1 << 0; /* write protect and no write protect */
  384. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  385. kfree(id);
  386. out:
  387. nvmet_req_complete(req, status);
  388. }
  389. static void nvmet_execute_identify_ns(struct nvmet_req *req)
  390. {
  391. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  392. struct nvme_id_ns *id;
  393. u16 status = 0;
  394. if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
  395. req->error_loc = offsetof(struct nvme_identify, nsid);
  396. status = NVME_SC_INVALID_NS | NVME_SC_DNR;
  397. goto out;
  398. }
  399. id = kzalloc(sizeof(*id), GFP_KERNEL);
  400. if (!id) {
  401. status = NVME_SC_INTERNAL;
  402. goto out;
  403. }
  404. /* return an all zeroed buffer if we can't find an active namespace */
  405. req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
  406. if (!req->ns) {
  407. status = 0;
  408. goto done;
  409. }
  410. nvmet_ns_revalidate(req->ns);
  411. /*
  412. * nuse = ncap = nsze isn't always true, but we have no way to find
  413. * that out from the underlying device.
  414. */
  415. id->ncap = id->nsze =
  416. cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
  417. switch (req->port->ana_state[req->ns->anagrpid]) {
  418. case NVME_ANA_INACCESSIBLE:
  419. case NVME_ANA_PERSISTENT_LOSS:
  420. break;
  421. default:
  422. id->nuse = id->nsze;
  423. break;
  424. }
  425. if (req->ns->bdev)
  426. nvmet_bdev_set_limits(req->ns->bdev, id);
  427. /*
  428. * We just provide a single LBA format that matches what the
  429. * underlying device reports.
  430. */
  431. id->nlbaf = 0;
  432. id->flbas = 0;
  433. /*
  434. * Our namespace might always be shared. Not just with other
  435. * controllers, but also with any other user of the block device.
  436. */
  437. id->nmic = (1 << 0);
  438. id->anagrpid = cpu_to_le32(req->ns->anagrpid);
  439. memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
  440. id->lbaf[0].ds = req->ns->blksize_shift;
  441. if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
  442. id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
  443. NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
  444. NVME_NS_DPC_PI_TYPE3;
  445. id->mc = NVME_MC_EXTENDED_LBA;
  446. id->dps = req->ns->pi_type;
  447. id->flbas = NVME_NS_FLBAS_META_EXT;
  448. id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
  449. }
  450. if (req->ns->readonly)
  451. id->nsattr |= (1 << 0);
  452. done:
  453. if (!status)
  454. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  455. kfree(id);
  456. out:
  457. nvmet_req_complete(req, status);
  458. }
  459. static void nvmet_execute_identify_nslist(struct nvmet_req *req)
  460. {
  461. static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
  462. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  463. struct nvmet_ns *ns;
  464. unsigned long idx;
  465. u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
  466. __le32 *list;
  467. u16 status = 0;
  468. int i = 0;
  469. list = kzalloc(buf_size, GFP_KERNEL);
  470. if (!list) {
  471. status = NVME_SC_INTERNAL;
  472. goto out;
  473. }
  474. xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
  475. if (ns->nsid <= min_nsid)
  476. continue;
  477. list[i++] = cpu_to_le32(ns->nsid);
  478. if (i == buf_size / sizeof(__le32))
  479. break;
  480. }
  481. status = nvmet_copy_to_sgl(req, 0, list, buf_size);
  482. kfree(list);
  483. out:
  484. nvmet_req_complete(req, status);
  485. }
  486. static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
  487. void *id, off_t *off)
  488. {
  489. struct nvme_ns_id_desc desc = {
  490. .nidt = type,
  491. .nidl = len,
  492. };
  493. u16 status;
  494. status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
  495. if (status)
  496. return status;
  497. *off += sizeof(desc);
  498. status = nvmet_copy_to_sgl(req, *off, id, len);
  499. if (status)
  500. return status;
  501. *off += len;
  502. return 0;
  503. }
  504. static void nvmet_execute_identify_desclist(struct nvmet_req *req)
  505. {
  506. struct nvmet_ns *ns;
  507. u16 status = 0;
  508. off_t off = 0;
  509. ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
  510. if (!ns) {
  511. req->error_loc = offsetof(struct nvme_identify, nsid);
  512. status = NVME_SC_INVALID_NS | NVME_SC_DNR;
  513. goto out;
  514. }
  515. if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
  516. status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
  517. NVME_NIDT_UUID_LEN,
  518. &ns->uuid, &off);
  519. if (status)
  520. goto out_put_ns;
  521. }
  522. if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
  523. status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
  524. NVME_NIDT_NGUID_LEN,
  525. &ns->nguid, &off);
  526. if (status)
  527. goto out_put_ns;
  528. }
  529. if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
  530. off) != NVME_IDENTIFY_DATA_SIZE - off)
  531. status = NVME_SC_INTERNAL | NVME_SC_DNR;
  532. out_put_ns:
  533. nvmet_put_namespace(ns);
  534. out:
  535. nvmet_req_complete(req, status);
  536. }
  537. static void nvmet_execute_identify(struct nvmet_req *req)
  538. {
  539. if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
  540. return;
  541. switch (req->cmd->identify.cns) {
  542. case NVME_ID_CNS_NS:
  543. return nvmet_execute_identify_ns(req);
  544. case NVME_ID_CNS_CTRL:
  545. return nvmet_execute_identify_ctrl(req);
  546. case NVME_ID_CNS_NS_ACTIVE_LIST:
  547. return nvmet_execute_identify_nslist(req);
  548. case NVME_ID_CNS_NS_DESC_LIST:
  549. return nvmet_execute_identify_desclist(req);
  550. }
  551. pr_debug("unhandled identify cns %d on qid %d\n",
  552. req->cmd->identify.cns, req->sq->qid);
  553. req->error_loc = offsetof(struct nvme_identify, cns);
  554. nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
  555. }
  556. /*
  557. * A "minimum viable" abort implementation: the command is mandatory in the
  558. * spec, but we are not required to do any useful work. We couldn't really
  559. * do a useful abort, so don't bother even with waiting for the command
  560. * to be exectuted and return immediately telling the command to abort
  561. * wasn't found.
  562. */
  563. static void nvmet_execute_abort(struct nvmet_req *req)
  564. {
  565. if (!nvmet_check_transfer_len(req, 0))
  566. return;
  567. nvmet_set_result(req, 1);
  568. nvmet_req_complete(req, 0);
  569. }
  570. static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
  571. {
  572. u16 status;
  573. if (req->ns->file)
  574. status = nvmet_file_flush(req);
  575. else
  576. status = nvmet_bdev_flush(req);
  577. if (status)
  578. pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
  579. return status;
  580. }
  581. static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
  582. {
  583. u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
  584. struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
  585. u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
  586. req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
  587. if (unlikely(!req->ns)) {
  588. req->error_loc = offsetof(struct nvme_common_command, nsid);
  589. return status;
  590. }
  591. mutex_lock(&subsys->lock);
  592. switch (write_protect) {
  593. case NVME_NS_WRITE_PROTECT:
  594. req->ns->readonly = true;
  595. status = nvmet_write_protect_flush_sync(req);
  596. if (status)
  597. req->ns->readonly = false;
  598. break;
  599. case NVME_NS_NO_WRITE_PROTECT:
  600. req->ns->readonly = false;
  601. status = 0;
  602. break;
  603. default:
  604. break;
  605. }
  606. if (!status)
  607. nvmet_ns_changed(subsys, req->ns->nsid);
  608. mutex_unlock(&subsys->lock);
  609. return status;
  610. }
  611. u16 nvmet_set_feat_kato(struct nvmet_req *req)
  612. {
  613. u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
  614. nvmet_stop_keep_alive_timer(req->sq->ctrl);
  615. req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
  616. nvmet_start_keep_alive_timer(req->sq->ctrl);
  617. nvmet_set_result(req, req->sq->ctrl->kato);
  618. return 0;
  619. }
  620. u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
  621. {
  622. u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
  623. if (val32 & ~mask) {
  624. req->error_loc = offsetof(struct nvme_common_command, cdw11);
  625. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  626. }
  627. WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
  628. nvmet_set_result(req, val32);
  629. return 0;
  630. }
  631. void nvmet_execute_set_features(struct nvmet_req *req)
  632. {
  633. struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
  634. u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
  635. u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
  636. u16 status = 0;
  637. u16 nsqr;
  638. u16 ncqr;
  639. if (!nvmet_check_transfer_len(req, 0))
  640. return;
  641. switch (cdw10 & 0xff) {
  642. case NVME_FEAT_NUM_QUEUES:
  643. ncqr = (cdw11 >> 16) & 0xffff;
  644. nsqr = cdw11 & 0xffff;
  645. if (ncqr == 0xffff || nsqr == 0xffff) {
  646. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  647. break;
  648. }
  649. nvmet_set_result(req,
  650. (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
  651. break;
  652. case NVME_FEAT_KATO:
  653. status = nvmet_set_feat_kato(req);
  654. break;
  655. case NVME_FEAT_ASYNC_EVENT:
  656. status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
  657. break;
  658. case NVME_FEAT_HOST_ID:
  659. status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
  660. break;
  661. case NVME_FEAT_WRITE_PROTECT:
  662. status = nvmet_set_feat_write_protect(req);
  663. break;
  664. default:
  665. req->error_loc = offsetof(struct nvme_common_command, cdw10);
  666. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  667. break;
  668. }
  669. nvmet_req_complete(req, status);
  670. }
  671. static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
  672. {
  673. struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
  674. u32 result;
  675. req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
  676. if (!req->ns) {
  677. req->error_loc = offsetof(struct nvme_common_command, nsid);
  678. return NVME_SC_INVALID_NS | NVME_SC_DNR;
  679. }
  680. mutex_lock(&subsys->lock);
  681. if (req->ns->readonly == true)
  682. result = NVME_NS_WRITE_PROTECT;
  683. else
  684. result = NVME_NS_NO_WRITE_PROTECT;
  685. nvmet_set_result(req, result);
  686. mutex_unlock(&subsys->lock);
  687. return 0;
  688. }
  689. void nvmet_get_feat_kato(struct nvmet_req *req)
  690. {
  691. nvmet_set_result(req, req->sq->ctrl->kato * 1000);
  692. }
  693. void nvmet_get_feat_async_event(struct nvmet_req *req)
  694. {
  695. nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
  696. }
  697. void nvmet_execute_get_features(struct nvmet_req *req)
  698. {
  699. struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
  700. u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
  701. u16 status = 0;
  702. if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
  703. return;
  704. switch (cdw10 & 0xff) {
  705. /*
  706. * These features are mandatory in the spec, but we don't
  707. * have a useful way to implement them. We'll eventually
  708. * need to come up with some fake values for these.
  709. */
  710. #if 0
  711. case NVME_FEAT_ARBITRATION:
  712. break;
  713. case NVME_FEAT_POWER_MGMT:
  714. break;
  715. case NVME_FEAT_TEMP_THRESH:
  716. break;
  717. case NVME_FEAT_ERR_RECOVERY:
  718. break;
  719. case NVME_FEAT_IRQ_COALESCE:
  720. break;
  721. case NVME_FEAT_IRQ_CONFIG:
  722. break;
  723. case NVME_FEAT_WRITE_ATOMIC:
  724. break;
  725. #endif
  726. case NVME_FEAT_ASYNC_EVENT:
  727. nvmet_get_feat_async_event(req);
  728. break;
  729. case NVME_FEAT_VOLATILE_WC:
  730. nvmet_set_result(req, 1);
  731. break;
  732. case NVME_FEAT_NUM_QUEUES:
  733. nvmet_set_result(req,
  734. (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
  735. break;
  736. case NVME_FEAT_KATO:
  737. nvmet_get_feat_kato(req);
  738. break;
  739. case NVME_FEAT_HOST_ID:
  740. /* need 128-bit host identifier flag */
  741. if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
  742. req->error_loc =
  743. offsetof(struct nvme_common_command, cdw11);
  744. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  745. break;
  746. }
  747. status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
  748. sizeof(req->sq->ctrl->hostid));
  749. break;
  750. case NVME_FEAT_WRITE_PROTECT:
  751. status = nvmet_get_feat_write_protect(req);
  752. break;
  753. default:
  754. req->error_loc =
  755. offsetof(struct nvme_common_command, cdw10);
  756. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  757. break;
  758. }
  759. nvmet_req_complete(req, status);
  760. }
  761. void nvmet_execute_async_event(struct nvmet_req *req)
  762. {
  763. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  764. if (!nvmet_check_transfer_len(req, 0))
  765. return;
  766. mutex_lock(&ctrl->lock);
  767. if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
  768. mutex_unlock(&ctrl->lock);
  769. nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
  770. return;
  771. }
  772. ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
  773. mutex_unlock(&ctrl->lock);
  774. schedule_work(&ctrl->async_event_work);
  775. }
  776. void nvmet_execute_keep_alive(struct nvmet_req *req)
  777. {
  778. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  779. if (!nvmet_check_transfer_len(req, 0))
  780. return;
  781. pr_debug("ctrl %d update keep-alive timer for %d secs\n",
  782. ctrl->cntlid, ctrl->kato);
  783. mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
  784. nvmet_req_complete(req, 0);
  785. }
  786. u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
  787. {
  788. struct nvme_command *cmd = req->cmd;
  789. u16 ret;
  790. if (nvme_is_fabrics(cmd))
  791. return nvmet_parse_fabrics_cmd(req);
  792. if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
  793. return nvmet_parse_discovery_cmd(req);
  794. ret = nvmet_check_ctrl_status(req, cmd);
  795. if (unlikely(ret))
  796. return ret;
  797. if (nvmet_req_passthru_ctrl(req))
  798. return nvmet_parse_passthru_admin_cmd(req);
  799. switch (cmd->common.opcode) {
  800. case nvme_admin_get_log_page:
  801. req->execute = nvmet_execute_get_log_page;
  802. return 0;
  803. case nvme_admin_identify:
  804. req->execute = nvmet_execute_identify;
  805. return 0;
  806. case nvme_admin_abort_cmd:
  807. req->execute = nvmet_execute_abort;
  808. return 0;
  809. case nvme_admin_set_features:
  810. req->execute = nvmet_execute_set_features;
  811. return 0;
  812. case nvme_admin_get_features:
  813. req->execute = nvmet_execute_get_features;
  814. return 0;
  815. case nvme_admin_async_event:
  816. req->execute = nvmet_execute_async_event;
  817. return 0;
  818. case nvme_admin_keep_alive:
  819. req->execute = nvmet_execute_keep_alive;
  820. return 0;
  821. }
  822. pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
  823. req->sq->qid);
  824. req->error_loc = offsetof(struct nvme_common_command, opcode);
  825. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  826. }