core.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Common code for the NVMe target.
  4. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/module.h>
  8. #include <linux/random.h>
  9. #include <linux/rculist.h>
  10. #include <linux/pci-p2pdma.h>
  11. #include <linux/scatterlist.h>
  12. #define CREATE_TRACE_POINTS
  13. #include "trace.h"
  14. #include "nvmet.h"
  15. struct workqueue_struct *buffered_io_wq;
  16. static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
  17. static DEFINE_IDA(cntlid_ida);
  18. /*
  19. * This read/write semaphore is used to synchronize access to configuration
  20. * information on a target system that will result in discovery log page
  21. * information change for at least one host.
  22. * The full list of resources to protected by this semaphore is:
  23. *
  24. * - subsystems list
  25. * - per-subsystem allowed hosts list
  26. * - allow_any_host subsystem attribute
  27. * - nvmet_genctr
  28. * - the nvmet_transports array
  29. *
  30. * When updating any of those lists/structures write lock should be obtained,
  31. * while when reading (popolating discovery log page or checking host-subsystem
  32. * link) read lock is obtained to allow concurrent reads.
  33. */
  34. DECLARE_RWSEM(nvmet_config_sem);
  35. u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
  36. u64 nvmet_ana_chgcnt;
  37. DECLARE_RWSEM(nvmet_ana_sem);
  38. inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
  39. {
  40. u16 status;
  41. switch (errno) {
  42. case 0:
  43. status = NVME_SC_SUCCESS;
  44. break;
  45. case -ENOSPC:
  46. req->error_loc = offsetof(struct nvme_rw_command, length);
  47. status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
  48. break;
  49. case -EREMOTEIO:
  50. req->error_loc = offsetof(struct nvme_rw_command, slba);
  51. status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
  52. break;
  53. case -EOPNOTSUPP:
  54. req->error_loc = offsetof(struct nvme_common_command, opcode);
  55. switch (req->cmd->common.opcode) {
  56. case nvme_cmd_dsm:
  57. case nvme_cmd_write_zeroes:
  58. status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
  59. break;
  60. default:
  61. status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  62. }
  63. break;
  64. case -ENODATA:
  65. req->error_loc = offsetof(struct nvme_rw_command, nsid);
  66. status = NVME_SC_ACCESS_DENIED;
  67. break;
  68. case -EIO:
  69. fallthrough;
  70. default:
  71. req->error_loc = offsetof(struct nvme_common_command, opcode);
  72. status = NVME_SC_INTERNAL | NVME_SC_DNR;
  73. }
  74. return status;
  75. }
  76. static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
  77. const char *subsysnqn);
  78. u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
  79. size_t len)
  80. {
  81. if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
  82. req->error_loc = offsetof(struct nvme_common_command, dptr);
  83. return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
  84. }
  85. return 0;
  86. }
  87. u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
  88. {
  89. if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
  90. req->error_loc = offsetof(struct nvme_common_command, dptr);
  91. return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
  92. }
  93. return 0;
  94. }
  95. u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
  96. {
  97. if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
  98. req->error_loc = offsetof(struct nvme_common_command, dptr);
  99. return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
  100. }
  101. return 0;
  102. }
  103. static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
  104. {
  105. unsigned long nsid = 0;
  106. struct nvmet_ns *cur;
  107. unsigned long idx;
  108. xa_for_each(&subsys->namespaces, idx, cur)
  109. nsid = cur->nsid;
  110. return nsid;
  111. }
  112. static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
  113. {
  114. return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
  115. }
  116. static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
  117. {
  118. u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
  119. struct nvmet_req *req;
  120. mutex_lock(&ctrl->lock);
  121. while (ctrl->nr_async_event_cmds) {
  122. req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
  123. mutex_unlock(&ctrl->lock);
  124. nvmet_req_complete(req, status);
  125. mutex_lock(&ctrl->lock);
  126. }
  127. mutex_unlock(&ctrl->lock);
  128. }
  129. static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
  130. {
  131. struct nvmet_async_event *aen;
  132. struct nvmet_req *req;
  133. mutex_lock(&ctrl->lock);
  134. while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
  135. aen = list_first_entry(&ctrl->async_events,
  136. struct nvmet_async_event, entry);
  137. req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
  138. nvmet_set_result(req, nvmet_async_event_result(aen));
  139. list_del(&aen->entry);
  140. kfree(aen);
  141. mutex_unlock(&ctrl->lock);
  142. trace_nvmet_async_event(ctrl, req->cqe->result.u32);
  143. nvmet_req_complete(req, 0);
  144. mutex_lock(&ctrl->lock);
  145. }
  146. mutex_unlock(&ctrl->lock);
  147. }
  148. static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
  149. {
  150. struct nvmet_async_event *aen, *tmp;
  151. mutex_lock(&ctrl->lock);
  152. list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
  153. list_del(&aen->entry);
  154. kfree(aen);
  155. }
  156. mutex_unlock(&ctrl->lock);
  157. }
  158. static void nvmet_async_event_work(struct work_struct *work)
  159. {
  160. struct nvmet_ctrl *ctrl =
  161. container_of(work, struct nvmet_ctrl, async_event_work);
  162. nvmet_async_events_process(ctrl);
  163. }
  164. void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
  165. u8 event_info, u8 log_page)
  166. {
  167. struct nvmet_async_event *aen;
  168. aen = kmalloc(sizeof(*aen), GFP_KERNEL);
  169. if (!aen)
  170. return;
  171. aen->event_type = event_type;
  172. aen->event_info = event_info;
  173. aen->log_page = log_page;
  174. mutex_lock(&ctrl->lock);
  175. list_add_tail(&aen->entry, &ctrl->async_events);
  176. mutex_unlock(&ctrl->lock);
  177. schedule_work(&ctrl->async_event_work);
  178. }
  179. static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
  180. {
  181. u32 i;
  182. mutex_lock(&ctrl->lock);
  183. if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
  184. goto out_unlock;
  185. for (i = 0; i < ctrl->nr_changed_ns; i++) {
  186. if (ctrl->changed_ns_list[i] == nsid)
  187. goto out_unlock;
  188. }
  189. if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
  190. ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
  191. ctrl->nr_changed_ns = U32_MAX;
  192. goto out_unlock;
  193. }
  194. ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
  195. out_unlock:
  196. mutex_unlock(&ctrl->lock);
  197. }
  198. void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
  199. {
  200. struct nvmet_ctrl *ctrl;
  201. lockdep_assert_held(&subsys->lock);
  202. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
  203. nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
  204. if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
  205. continue;
  206. nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
  207. NVME_AER_NOTICE_NS_CHANGED,
  208. NVME_LOG_CHANGED_NS);
  209. }
  210. }
  211. void nvmet_send_ana_event(struct nvmet_subsys *subsys,
  212. struct nvmet_port *port)
  213. {
  214. struct nvmet_ctrl *ctrl;
  215. mutex_lock(&subsys->lock);
  216. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
  217. if (port && ctrl->port != port)
  218. continue;
  219. if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
  220. continue;
  221. nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
  222. NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
  223. }
  224. mutex_unlock(&subsys->lock);
  225. }
  226. void nvmet_port_send_ana_event(struct nvmet_port *port)
  227. {
  228. struct nvmet_subsys_link *p;
  229. down_read(&nvmet_config_sem);
  230. list_for_each_entry(p, &port->subsystems, entry)
  231. nvmet_send_ana_event(p->subsys, port);
  232. up_read(&nvmet_config_sem);
  233. }
  234. int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
  235. {
  236. int ret = 0;
  237. down_write(&nvmet_config_sem);
  238. if (nvmet_transports[ops->type])
  239. ret = -EINVAL;
  240. else
  241. nvmet_transports[ops->type] = ops;
  242. up_write(&nvmet_config_sem);
  243. return ret;
  244. }
  245. EXPORT_SYMBOL_GPL(nvmet_register_transport);
  246. void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
  247. {
  248. down_write(&nvmet_config_sem);
  249. nvmet_transports[ops->type] = NULL;
  250. up_write(&nvmet_config_sem);
  251. }
  252. EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
  253. void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
  254. {
  255. struct nvmet_ctrl *ctrl;
  256. mutex_lock(&subsys->lock);
  257. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
  258. if (ctrl->port == port)
  259. ctrl->ops->delete_ctrl(ctrl);
  260. }
  261. mutex_unlock(&subsys->lock);
  262. }
  263. int nvmet_enable_port(struct nvmet_port *port)
  264. {
  265. const struct nvmet_fabrics_ops *ops;
  266. int ret;
  267. lockdep_assert_held(&nvmet_config_sem);
  268. ops = nvmet_transports[port->disc_addr.trtype];
  269. if (!ops) {
  270. up_write(&nvmet_config_sem);
  271. request_module("nvmet-transport-%d", port->disc_addr.trtype);
  272. down_write(&nvmet_config_sem);
  273. ops = nvmet_transports[port->disc_addr.trtype];
  274. if (!ops) {
  275. pr_err("transport type %d not supported\n",
  276. port->disc_addr.trtype);
  277. return -EINVAL;
  278. }
  279. }
  280. if (!try_module_get(ops->owner))
  281. return -EINVAL;
  282. /*
  283. * If the user requested PI support and the transport isn't pi capable,
  284. * don't enable the port.
  285. */
  286. if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
  287. pr_err("T10-PI is not supported by transport type %d\n",
  288. port->disc_addr.trtype);
  289. ret = -EINVAL;
  290. goto out_put;
  291. }
  292. ret = ops->add_port(port);
  293. if (ret)
  294. goto out_put;
  295. /* If the transport didn't set inline_data_size, then disable it. */
  296. if (port->inline_data_size < 0)
  297. port->inline_data_size = 0;
  298. port->enabled = true;
  299. port->tr_ops = ops;
  300. return 0;
  301. out_put:
  302. module_put(ops->owner);
  303. return ret;
  304. }
  305. void nvmet_disable_port(struct nvmet_port *port)
  306. {
  307. const struct nvmet_fabrics_ops *ops;
  308. lockdep_assert_held(&nvmet_config_sem);
  309. port->enabled = false;
  310. port->tr_ops = NULL;
  311. ops = nvmet_transports[port->disc_addr.trtype];
  312. ops->remove_port(port);
  313. module_put(ops->owner);
  314. }
  315. static void nvmet_keep_alive_timer(struct work_struct *work)
  316. {
  317. struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
  318. struct nvmet_ctrl, ka_work);
  319. bool reset_tbkas = ctrl->reset_tbkas;
  320. ctrl->reset_tbkas = false;
  321. if (reset_tbkas) {
  322. pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
  323. ctrl->cntlid);
  324. schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
  325. return;
  326. }
  327. pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
  328. ctrl->cntlid, ctrl->kato);
  329. nvmet_ctrl_fatal_error(ctrl);
  330. }
  331. void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
  332. {
  333. if (unlikely(ctrl->kato == 0))
  334. return;
  335. pr_debug("ctrl %d start keep-alive timer for %d secs\n",
  336. ctrl->cntlid, ctrl->kato);
  337. INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
  338. schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
  339. }
  340. void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
  341. {
  342. if (unlikely(ctrl->kato == 0))
  343. return;
  344. pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
  345. cancel_delayed_work_sync(&ctrl->ka_work);
  346. }
  347. struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
  348. {
  349. struct nvmet_ns *ns;
  350. ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
  351. if (ns)
  352. percpu_ref_get(&ns->ref);
  353. return ns;
  354. }
  355. static void nvmet_destroy_namespace(struct percpu_ref *ref)
  356. {
  357. struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
  358. complete(&ns->disable_done);
  359. }
  360. void nvmet_put_namespace(struct nvmet_ns *ns)
  361. {
  362. percpu_ref_put(&ns->ref);
  363. }
  364. static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
  365. {
  366. nvmet_bdev_ns_disable(ns);
  367. nvmet_file_ns_disable(ns);
  368. }
  369. static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
  370. {
  371. int ret;
  372. struct pci_dev *p2p_dev;
  373. if (!ns->use_p2pmem)
  374. return 0;
  375. if (!ns->bdev) {
  376. pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
  377. return -EINVAL;
  378. }
  379. if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
  380. pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
  381. ns->device_path);
  382. return -EINVAL;
  383. }
  384. if (ns->p2p_dev) {
  385. ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
  386. if (ret < 0)
  387. return -EINVAL;
  388. } else {
  389. /*
  390. * Right now we just check that there is p2pmem available so
  391. * we can report an error to the user right away if there
  392. * is not. We'll find the actual device to use once we
  393. * setup the controller when the port's device is available.
  394. */
  395. p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
  396. if (!p2p_dev) {
  397. pr_err("no peer-to-peer memory is available for %s\n",
  398. ns->device_path);
  399. return -EINVAL;
  400. }
  401. pci_dev_put(p2p_dev);
  402. }
  403. return 0;
  404. }
  405. /*
  406. * Note: ctrl->subsys->lock should be held when calling this function
  407. */
  408. static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
  409. struct nvmet_ns *ns)
  410. {
  411. struct device *clients[2];
  412. struct pci_dev *p2p_dev;
  413. int ret;
  414. if (!ctrl->p2p_client || !ns->use_p2pmem)
  415. return;
  416. if (ns->p2p_dev) {
  417. ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
  418. if (ret < 0)
  419. return;
  420. p2p_dev = pci_dev_get(ns->p2p_dev);
  421. } else {
  422. clients[0] = ctrl->p2p_client;
  423. clients[1] = nvmet_ns_dev(ns);
  424. p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
  425. if (!p2p_dev) {
  426. pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
  427. dev_name(ctrl->p2p_client), ns->device_path);
  428. return;
  429. }
  430. }
  431. ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
  432. if (ret < 0)
  433. pci_dev_put(p2p_dev);
  434. pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
  435. ns->nsid);
  436. }
  437. void nvmet_ns_revalidate(struct nvmet_ns *ns)
  438. {
  439. loff_t oldsize = ns->size;
  440. if (ns->bdev)
  441. nvmet_bdev_ns_revalidate(ns);
  442. else
  443. nvmet_file_ns_revalidate(ns);
  444. if (oldsize != ns->size)
  445. nvmet_ns_changed(ns->subsys, ns->nsid);
  446. }
  447. int nvmet_ns_enable(struct nvmet_ns *ns)
  448. {
  449. struct nvmet_subsys *subsys = ns->subsys;
  450. struct nvmet_ctrl *ctrl;
  451. int ret;
  452. mutex_lock(&subsys->lock);
  453. ret = 0;
  454. if (nvmet_passthru_ctrl(subsys)) {
  455. pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
  456. goto out_unlock;
  457. }
  458. if (ns->enabled)
  459. goto out_unlock;
  460. ret = -EMFILE;
  461. if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
  462. goto out_unlock;
  463. ret = nvmet_bdev_ns_enable(ns);
  464. if (ret == -ENOTBLK)
  465. ret = nvmet_file_ns_enable(ns);
  466. if (ret)
  467. goto out_unlock;
  468. ret = nvmet_p2pmem_ns_enable(ns);
  469. if (ret)
  470. goto out_dev_disable;
  471. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
  472. nvmet_p2pmem_ns_add_p2p(ctrl, ns);
  473. ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
  474. 0, GFP_KERNEL);
  475. if (ret)
  476. goto out_dev_put;
  477. if (ns->nsid > subsys->max_nsid)
  478. subsys->max_nsid = ns->nsid;
  479. ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
  480. if (ret)
  481. goto out_restore_subsys_maxnsid;
  482. subsys->nr_namespaces++;
  483. nvmet_ns_changed(subsys, ns->nsid);
  484. ns->enabled = true;
  485. ret = 0;
  486. out_unlock:
  487. mutex_unlock(&subsys->lock);
  488. return ret;
  489. out_restore_subsys_maxnsid:
  490. subsys->max_nsid = nvmet_max_nsid(subsys);
  491. percpu_ref_exit(&ns->ref);
  492. out_dev_put:
  493. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
  494. pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
  495. out_dev_disable:
  496. nvmet_ns_dev_disable(ns);
  497. goto out_unlock;
  498. }
  499. void nvmet_ns_disable(struct nvmet_ns *ns)
  500. {
  501. struct nvmet_subsys *subsys = ns->subsys;
  502. struct nvmet_ctrl *ctrl;
  503. mutex_lock(&subsys->lock);
  504. if (!ns->enabled)
  505. goto out_unlock;
  506. ns->enabled = false;
  507. xa_erase(&ns->subsys->namespaces, ns->nsid);
  508. if (ns->nsid == subsys->max_nsid)
  509. subsys->max_nsid = nvmet_max_nsid(subsys);
  510. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
  511. pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
  512. mutex_unlock(&subsys->lock);
  513. /*
  514. * Now that we removed the namespaces from the lookup list, we
  515. * can kill the per_cpu ref and wait for any remaining references
  516. * to be dropped, as well as a RCU grace period for anyone only
  517. * using the namepace under rcu_read_lock(). Note that we can't
  518. * use call_rcu here as we need to ensure the namespaces have
  519. * been fully destroyed before unloading the module.
  520. */
  521. percpu_ref_kill(&ns->ref);
  522. synchronize_rcu();
  523. wait_for_completion(&ns->disable_done);
  524. percpu_ref_exit(&ns->ref);
  525. mutex_lock(&subsys->lock);
  526. subsys->nr_namespaces--;
  527. nvmet_ns_changed(subsys, ns->nsid);
  528. nvmet_ns_dev_disable(ns);
  529. out_unlock:
  530. mutex_unlock(&subsys->lock);
  531. }
  532. void nvmet_ns_free(struct nvmet_ns *ns)
  533. {
  534. nvmet_ns_disable(ns);
  535. down_write(&nvmet_ana_sem);
  536. nvmet_ana_group_enabled[ns->anagrpid]--;
  537. up_write(&nvmet_ana_sem);
  538. kfree(ns->device_path);
  539. kfree(ns);
  540. }
  541. struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
  542. {
  543. struct nvmet_ns *ns;
  544. ns = kzalloc(sizeof(*ns), GFP_KERNEL);
  545. if (!ns)
  546. return NULL;
  547. init_completion(&ns->disable_done);
  548. ns->nsid = nsid;
  549. ns->subsys = subsys;
  550. down_write(&nvmet_ana_sem);
  551. ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
  552. nvmet_ana_group_enabled[ns->anagrpid]++;
  553. up_write(&nvmet_ana_sem);
  554. uuid_gen(&ns->uuid);
  555. ns->buffered_io = false;
  556. return ns;
  557. }
  558. static void nvmet_update_sq_head(struct nvmet_req *req)
  559. {
  560. if (req->sq->size) {
  561. u32 old_sqhd, new_sqhd;
  562. do {
  563. old_sqhd = req->sq->sqhd;
  564. new_sqhd = (old_sqhd + 1) % req->sq->size;
  565. } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
  566. old_sqhd);
  567. }
  568. req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
  569. }
  570. static void nvmet_set_error(struct nvmet_req *req, u16 status)
  571. {
  572. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  573. struct nvme_error_slot *new_error_slot;
  574. unsigned long flags;
  575. req->cqe->status = cpu_to_le16(status << 1);
  576. if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
  577. return;
  578. spin_lock_irqsave(&ctrl->error_lock, flags);
  579. ctrl->err_counter++;
  580. new_error_slot =
  581. &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
  582. new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
  583. new_error_slot->sqid = cpu_to_le16(req->sq->qid);
  584. new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
  585. new_error_slot->status_field = cpu_to_le16(status << 1);
  586. new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
  587. new_error_slot->lba = cpu_to_le64(req->error_slba);
  588. new_error_slot->nsid = req->cmd->common.nsid;
  589. spin_unlock_irqrestore(&ctrl->error_lock, flags);
  590. /* set the more bit for this request */
  591. req->cqe->status |= cpu_to_le16(1 << 14);
  592. }
  593. static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
  594. {
  595. if (!req->sq->sqhd_disabled)
  596. nvmet_update_sq_head(req);
  597. req->cqe->sq_id = cpu_to_le16(req->sq->qid);
  598. req->cqe->command_id = req->cmd->common.command_id;
  599. if (unlikely(status))
  600. nvmet_set_error(req, status);
  601. trace_nvmet_req_complete(req);
  602. if (req->ns)
  603. nvmet_put_namespace(req->ns);
  604. req->ops->queue_response(req);
  605. }
  606. void nvmet_req_complete(struct nvmet_req *req, u16 status)
  607. {
  608. __nvmet_req_complete(req, status);
  609. percpu_ref_put(&req->sq->ref);
  610. }
  611. EXPORT_SYMBOL_GPL(nvmet_req_complete);
  612. void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
  613. u16 qid, u16 size)
  614. {
  615. cq->qid = qid;
  616. cq->size = size;
  617. }
  618. void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
  619. u16 qid, u16 size)
  620. {
  621. sq->sqhd = 0;
  622. sq->qid = qid;
  623. sq->size = size;
  624. ctrl->sqs[qid] = sq;
  625. }
  626. static void nvmet_confirm_sq(struct percpu_ref *ref)
  627. {
  628. struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
  629. complete(&sq->confirm_done);
  630. }
  631. void nvmet_sq_destroy(struct nvmet_sq *sq)
  632. {
  633. struct nvmet_ctrl *ctrl = sq->ctrl;
  634. /*
  635. * If this is the admin queue, complete all AERs so that our
  636. * queue doesn't have outstanding requests on it.
  637. */
  638. if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
  639. nvmet_async_events_failall(ctrl);
  640. percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
  641. wait_for_completion(&sq->confirm_done);
  642. wait_for_completion(&sq->free_done);
  643. percpu_ref_exit(&sq->ref);
  644. if (ctrl) {
  645. /*
  646. * The teardown flow may take some time, and the host may not
  647. * send us keep-alive during this period, hence reset the
  648. * traffic based keep-alive timer so we don't trigger a
  649. * controller teardown as a result of a keep-alive expiration.
  650. */
  651. ctrl->reset_tbkas = true;
  652. nvmet_ctrl_put(ctrl);
  653. sq->ctrl = NULL; /* allows reusing the queue later */
  654. }
  655. }
  656. EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
  657. static void nvmet_sq_free(struct percpu_ref *ref)
  658. {
  659. struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
  660. complete(&sq->free_done);
  661. }
  662. int nvmet_sq_init(struct nvmet_sq *sq)
  663. {
  664. int ret;
  665. ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
  666. if (ret) {
  667. pr_err("percpu_ref init failed!\n");
  668. return ret;
  669. }
  670. init_completion(&sq->free_done);
  671. init_completion(&sq->confirm_done);
  672. return 0;
  673. }
  674. EXPORT_SYMBOL_GPL(nvmet_sq_init);
  675. static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
  676. struct nvmet_ns *ns)
  677. {
  678. enum nvme_ana_state state = port->ana_state[ns->anagrpid];
  679. if (unlikely(state == NVME_ANA_INACCESSIBLE))
  680. return NVME_SC_ANA_INACCESSIBLE;
  681. if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
  682. return NVME_SC_ANA_PERSISTENT_LOSS;
  683. if (unlikely(state == NVME_ANA_CHANGE))
  684. return NVME_SC_ANA_TRANSITION;
  685. return 0;
  686. }
  687. static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
  688. {
  689. if (unlikely(req->ns->readonly)) {
  690. switch (req->cmd->common.opcode) {
  691. case nvme_cmd_read:
  692. case nvme_cmd_flush:
  693. break;
  694. default:
  695. return NVME_SC_NS_WRITE_PROTECTED;
  696. }
  697. }
  698. return 0;
  699. }
  700. static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
  701. {
  702. struct nvme_command *cmd = req->cmd;
  703. u16 ret;
  704. ret = nvmet_check_ctrl_status(req, cmd);
  705. if (unlikely(ret))
  706. return ret;
  707. if (nvmet_req_passthru_ctrl(req))
  708. return nvmet_parse_passthru_io_cmd(req);
  709. req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
  710. if (unlikely(!req->ns)) {
  711. req->error_loc = offsetof(struct nvme_common_command, nsid);
  712. return NVME_SC_INVALID_NS | NVME_SC_DNR;
  713. }
  714. ret = nvmet_check_ana_state(req->port, req->ns);
  715. if (unlikely(ret)) {
  716. req->error_loc = offsetof(struct nvme_common_command, nsid);
  717. return ret;
  718. }
  719. ret = nvmet_io_cmd_check_access(req);
  720. if (unlikely(ret)) {
  721. req->error_loc = offsetof(struct nvme_common_command, nsid);
  722. return ret;
  723. }
  724. if (req->ns->file)
  725. return nvmet_file_parse_io_cmd(req);
  726. else
  727. return nvmet_bdev_parse_io_cmd(req);
  728. }
  729. bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
  730. struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
  731. {
  732. u8 flags = req->cmd->common.flags;
  733. u16 status;
  734. req->cq = cq;
  735. req->sq = sq;
  736. req->ops = ops;
  737. req->sg = NULL;
  738. req->metadata_sg = NULL;
  739. req->sg_cnt = 0;
  740. req->metadata_sg_cnt = 0;
  741. req->transfer_len = 0;
  742. req->metadata_len = 0;
  743. req->cqe->status = 0;
  744. req->cqe->sq_head = 0;
  745. req->ns = NULL;
  746. req->error_loc = NVMET_NO_ERROR_LOC;
  747. req->error_slba = 0;
  748. /* no support for fused commands yet */
  749. if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
  750. req->error_loc = offsetof(struct nvme_common_command, flags);
  751. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  752. goto fail;
  753. }
  754. /*
  755. * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
  756. * contains an address of a single contiguous physical buffer that is
  757. * byte aligned.
  758. */
  759. if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
  760. req->error_loc = offsetof(struct nvme_common_command, flags);
  761. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  762. goto fail;
  763. }
  764. if (unlikely(!req->sq->ctrl))
  765. /* will return an error for any non-connect command: */
  766. status = nvmet_parse_connect_cmd(req);
  767. else if (likely(req->sq->qid != 0))
  768. status = nvmet_parse_io_cmd(req);
  769. else
  770. status = nvmet_parse_admin_cmd(req);
  771. if (status)
  772. goto fail;
  773. trace_nvmet_req_init(req, req->cmd);
  774. if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
  775. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  776. goto fail;
  777. }
  778. if (sq->ctrl)
  779. sq->ctrl->reset_tbkas = true;
  780. return true;
  781. fail:
  782. __nvmet_req_complete(req, status);
  783. return false;
  784. }
  785. EXPORT_SYMBOL_GPL(nvmet_req_init);
  786. void nvmet_req_uninit(struct nvmet_req *req)
  787. {
  788. percpu_ref_put(&req->sq->ref);
  789. if (req->ns)
  790. nvmet_put_namespace(req->ns);
  791. }
  792. EXPORT_SYMBOL_GPL(nvmet_req_uninit);
  793. bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
  794. {
  795. if (unlikely(len != req->transfer_len)) {
  796. req->error_loc = offsetof(struct nvme_common_command, dptr);
  797. nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
  798. return false;
  799. }
  800. return true;
  801. }
  802. EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
  803. bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
  804. {
  805. if (unlikely(data_len > req->transfer_len)) {
  806. req->error_loc = offsetof(struct nvme_common_command, dptr);
  807. nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
  808. return false;
  809. }
  810. return true;
  811. }
  812. static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
  813. {
  814. return req->transfer_len - req->metadata_len;
  815. }
  816. static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
  817. struct nvmet_req *req)
  818. {
  819. req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
  820. nvmet_data_transfer_len(req));
  821. if (!req->sg)
  822. goto out_err;
  823. if (req->metadata_len) {
  824. req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
  825. &req->metadata_sg_cnt, req->metadata_len);
  826. if (!req->metadata_sg)
  827. goto out_free_sg;
  828. }
  829. req->p2p_dev = p2p_dev;
  830. return 0;
  831. out_free_sg:
  832. pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
  833. out_err:
  834. return -ENOMEM;
  835. }
  836. static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
  837. {
  838. if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
  839. !req->sq->ctrl || !req->sq->qid || !req->ns)
  840. return NULL;
  841. return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
  842. }
  843. int nvmet_req_alloc_sgls(struct nvmet_req *req)
  844. {
  845. struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
  846. if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
  847. return 0;
  848. req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
  849. &req->sg_cnt);
  850. if (unlikely(!req->sg))
  851. goto out;
  852. if (req->metadata_len) {
  853. req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
  854. &req->metadata_sg_cnt);
  855. if (unlikely(!req->metadata_sg))
  856. goto out_free;
  857. }
  858. return 0;
  859. out_free:
  860. sgl_free(req->sg);
  861. out:
  862. return -ENOMEM;
  863. }
  864. EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
  865. void nvmet_req_free_sgls(struct nvmet_req *req)
  866. {
  867. if (req->p2p_dev) {
  868. pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
  869. if (req->metadata_sg)
  870. pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
  871. req->p2p_dev = NULL;
  872. } else {
  873. sgl_free(req->sg);
  874. if (req->metadata_sg)
  875. sgl_free(req->metadata_sg);
  876. }
  877. req->sg = NULL;
  878. req->metadata_sg = NULL;
  879. req->sg_cnt = 0;
  880. req->metadata_sg_cnt = 0;
  881. }
  882. EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
  883. static inline bool nvmet_cc_en(u32 cc)
  884. {
  885. return (cc >> NVME_CC_EN_SHIFT) & 0x1;
  886. }
  887. static inline u8 nvmet_cc_css(u32 cc)
  888. {
  889. return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
  890. }
  891. static inline u8 nvmet_cc_mps(u32 cc)
  892. {
  893. return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
  894. }
  895. static inline u8 nvmet_cc_ams(u32 cc)
  896. {
  897. return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
  898. }
  899. static inline u8 nvmet_cc_shn(u32 cc)
  900. {
  901. return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
  902. }
  903. static inline u8 nvmet_cc_iosqes(u32 cc)
  904. {
  905. return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
  906. }
  907. static inline u8 nvmet_cc_iocqes(u32 cc)
  908. {
  909. return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
  910. }
  911. static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
  912. {
  913. lockdep_assert_held(&ctrl->lock);
  914. /*
  915. * Only I/O controllers should verify iosqes,iocqes.
  916. * Strictly speaking, the spec says a discovery controller
  917. * should verify iosqes,iocqes are zeroed, however that
  918. * would break backwards compatibility, so don't enforce it.
  919. */
  920. if (ctrl->subsys->type != NVME_NQN_DISC &&
  921. (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
  922. nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
  923. ctrl->csts = NVME_CSTS_CFS;
  924. return;
  925. }
  926. if (nvmet_cc_mps(ctrl->cc) != 0 ||
  927. nvmet_cc_ams(ctrl->cc) != 0 ||
  928. nvmet_cc_css(ctrl->cc) != 0) {
  929. ctrl->csts = NVME_CSTS_CFS;
  930. return;
  931. }
  932. ctrl->csts = NVME_CSTS_RDY;
  933. /*
  934. * Controllers that are not yet enabled should not really enforce the
  935. * keep alive timeout, but we still want to track a timeout and cleanup
  936. * in case a host died before it enabled the controller. Hence, simply
  937. * reset the keep alive timer when the controller is enabled.
  938. */
  939. if (ctrl->kato)
  940. mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
  941. }
  942. static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
  943. {
  944. lockdep_assert_held(&ctrl->lock);
  945. /* XXX: tear down queues? */
  946. ctrl->csts &= ~NVME_CSTS_RDY;
  947. ctrl->cc = 0;
  948. }
  949. void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
  950. {
  951. u32 old;
  952. mutex_lock(&ctrl->lock);
  953. old = ctrl->cc;
  954. ctrl->cc = new;
  955. if (nvmet_cc_en(new) && !nvmet_cc_en(old))
  956. nvmet_start_ctrl(ctrl);
  957. if (!nvmet_cc_en(new) && nvmet_cc_en(old))
  958. nvmet_clear_ctrl(ctrl);
  959. if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
  960. nvmet_clear_ctrl(ctrl);
  961. ctrl->csts |= NVME_CSTS_SHST_CMPLT;
  962. }
  963. if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
  964. ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
  965. mutex_unlock(&ctrl->lock);
  966. }
  967. static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
  968. {
  969. /* command sets supported: NVMe command set: */
  970. ctrl->cap = (1ULL << 37);
  971. /* CC.EN timeout in 500msec units: */
  972. ctrl->cap |= (15ULL << 24);
  973. /* maximum queue entries supported: */
  974. ctrl->cap |= NVMET_QUEUE_SIZE - 1;
  975. }
  976. u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
  977. struct nvmet_req *req, struct nvmet_ctrl **ret)
  978. {
  979. struct nvmet_subsys *subsys;
  980. struct nvmet_ctrl *ctrl;
  981. u16 status = 0;
  982. subsys = nvmet_find_get_subsys(req->port, subsysnqn);
  983. if (!subsys) {
  984. pr_warn("connect request for invalid subsystem %s!\n",
  985. subsysnqn);
  986. req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
  987. return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  988. }
  989. mutex_lock(&subsys->lock);
  990. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
  991. if (ctrl->cntlid == cntlid) {
  992. if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
  993. pr_warn("hostnqn mismatch.\n");
  994. continue;
  995. }
  996. if (!kref_get_unless_zero(&ctrl->ref))
  997. continue;
  998. *ret = ctrl;
  999. goto out;
  1000. }
  1001. }
  1002. pr_warn("could not find controller %d for subsys %s / host %s\n",
  1003. cntlid, subsysnqn, hostnqn);
  1004. req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
  1005. status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  1006. out:
  1007. mutex_unlock(&subsys->lock);
  1008. nvmet_subsys_put(subsys);
  1009. return status;
  1010. }
  1011. u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
  1012. {
  1013. if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
  1014. pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
  1015. cmd->common.opcode, req->sq->qid);
  1016. return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
  1017. }
  1018. if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
  1019. pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
  1020. cmd->common.opcode, req->sq->qid);
  1021. return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
  1022. }
  1023. return 0;
  1024. }
  1025. bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
  1026. {
  1027. struct nvmet_host_link *p;
  1028. lockdep_assert_held(&nvmet_config_sem);
  1029. if (subsys->allow_any_host)
  1030. return true;
  1031. if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
  1032. return true;
  1033. list_for_each_entry(p, &subsys->hosts, entry) {
  1034. if (!strcmp(nvmet_host_name(p->host), hostnqn))
  1035. return true;
  1036. }
  1037. return false;
  1038. }
  1039. /*
  1040. * Note: ctrl->subsys->lock should be held when calling this function
  1041. */
  1042. static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
  1043. struct nvmet_req *req)
  1044. {
  1045. struct nvmet_ns *ns;
  1046. unsigned long idx;
  1047. if (!req->p2p_client)
  1048. return;
  1049. ctrl->p2p_client = get_device(req->p2p_client);
  1050. xa_for_each(&ctrl->subsys->namespaces, idx, ns)
  1051. nvmet_p2pmem_ns_add_p2p(ctrl, ns);
  1052. }
  1053. /*
  1054. * Note: ctrl->subsys->lock should be held when calling this function
  1055. */
  1056. static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
  1057. {
  1058. struct radix_tree_iter iter;
  1059. void __rcu **slot;
  1060. radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
  1061. pci_dev_put(radix_tree_deref_slot(slot));
  1062. put_device(ctrl->p2p_client);
  1063. }
  1064. static void nvmet_fatal_error_handler(struct work_struct *work)
  1065. {
  1066. struct nvmet_ctrl *ctrl =
  1067. container_of(work, struct nvmet_ctrl, fatal_err_work);
  1068. pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
  1069. ctrl->ops->delete_ctrl(ctrl);
  1070. }
  1071. u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
  1072. struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
  1073. {
  1074. struct nvmet_subsys *subsys;
  1075. struct nvmet_ctrl *ctrl;
  1076. int ret;
  1077. u16 status;
  1078. status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  1079. subsys = nvmet_find_get_subsys(req->port, subsysnqn);
  1080. if (!subsys) {
  1081. pr_warn("connect request for invalid subsystem %s!\n",
  1082. subsysnqn);
  1083. req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
  1084. goto out;
  1085. }
  1086. status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
  1087. down_read(&nvmet_config_sem);
  1088. if (!nvmet_host_allowed(subsys, hostnqn)) {
  1089. pr_info("connect by host %s for subsystem %s not allowed\n",
  1090. hostnqn, subsysnqn);
  1091. req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
  1092. up_read(&nvmet_config_sem);
  1093. status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
  1094. goto out_put_subsystem;
  1095. }
  1096. up_read(&nvmet_config_sem);
  1097. status = NVME_SC_INTERNAL;
  1098. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  1099. if (!ctrl)
  1100. goto out_put_subsystem;
  1101. mutex_init(&ctrl->lock);
  1102. nvmet_init_cap(ctrl);
  1103. ctrl->port = req->port;
  1104. INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
  1105. INIT_LIST_HEAD(&ctrl->async_events);
  1106. INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
  1107. INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
  1108. memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
  1109. memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
  1110. kref_init(&ctrl->ref);
  1111. ctrl->subsys = subsys;
  1112. WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
  1113. ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
  1114. sizeof(__le32), GFP_KERNEL);
  1115. if (!ctrl->changed_ns_list)
  1116. goto out_free_ctrl;
  1117. ctrl->sqs = kcalloc(subsys->max_qid + 1,
  1118. sizeof(struct nvmet_sq *),
  1119. GFP_KERNEL);
  1120. if (!ctrl->sqs)
  1121. goto out_free_changed_ns_list;
  1122. if (subsys->cntlid_min > subsys->cntlid_max)
  1123. goto out_free_sqs;
  1124. ret = ida_simple_get(&cntlid_ida,
  1125. subsys->cntlid_min, subsys->cntlid_max,
  1126. GFP_KERNEL);
  1127. if (ret < 0) {
  1128. status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
  1129. goto out_free_sqs;
  1130. }
  1131. ctrl->cntlid = ret;
  1132. ctrl->ops = req->ops;
  1133. /*
  1134. * Discovery controllers may use some arbitrary high value
  1135. * in order to cleanup stale discovery sessions
  1136. */
  1137. if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
  1138. kato = NVMET_DISC_KATO_MS;
  1139. /* keep-alive timeout in seconds */
  1140. ctrl->kato = DIV_ROUND_UP(kato, 1000);
  1141. ctrl->err_counter = 0;
  1142. spin_lock_init(&ctrl->error_lock);
  1143. nvmet_start_keep_alive_timer(ctrl);
  1144. mutex_lock(&subsys->lock);
  1145. list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
  1146. nvmet_setup_p2p_ns_map(ctrl, req);
  1147. mutex_unlock(&subsys->lock);
  1148. *ctrlp = ctrl;
  1149. return 0;
  1150. out_free_sqs:
  1151. kfree(ctrl->sqs);
  1152. out_free_changed_ns_list:
  1153. kfree(ctrl->changed_ns_list);
  1154. out_free_ctrl:
  1155. kfree(ctrl);
  1156. out_put_subsystem:
  1157. nvmet_subsys_put(subsys);
  1158. out:
  1159. return status;
  1160. }
  1161. static void nvmet_ctrl_free(struct kref *ref)
  1162. {
  1163. struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
  1164. struct nvmet_subsys *subsys = ctrl->subsys;
  1165. mutex_lock(&subsys->lock);
  1166. nvmet_release_p2p_ns_map(ctrl);
  1167. list_del(&ctrl->subsys_entry);
  1168. mutex_unlock(&subsys->lock);
  1169. nvmet_stop_keep_alive_timer(ctrl);
  1170. flush_work(&ctrl->async_event_work);
  1171. cancel_work_sync(&ctrl->fatal_err_work);
  1172. ida_simple_remove(&cntlid_ida, ctrl->cntlid);
  1173. nvmet_async_events_free(ctrl);
  1174. kfree(ctrl->sqs);
  1175. kfree(ctrl->changed_ns_list);
  1176. kfree(ctrl);
  1177. nvmet_subsys_put(subsys);
  1178. }
  1179. void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
  1180. {
  1181. kref_put(&ctrl->ref, nvmet_ctrl_free);
  1182. }
  1183. void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
  1184. {
  1185. mutex_lock(&ctrl->lock);
  1186. if (!(ctrl->csts & NVME_CSTS_CFS)) {
  1187. ctrl->csts |= NVME_CSTS_CFS;
  1188. schedule_work(&ctrl->fatal_err_work);
  1189. }
  1190. mutex_unlock(&ctrl->lock);
  1191. }
  1192. EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
  1193. static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
  1194. const char *subsysnqn)
  1195. {
  1196. struct nvmet_subsys_link *p;
  1197. if (!port)
  1198. return NULL;
  1199. if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
  1200. if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
  1201. return NULL;
  1202. return nvmet_disc_subsys;
  1203. }
  1204. down_read(&nvmet_config_sem);
  1205. list_for_each_entry(p, &port->subsystems, entry) {
  1206. if (!strncmp(p->subsys->subsysnqn, subsysnqn,
  1207. NVMF_NQN_SIZE)) {
  1208. if (!kref_get_unless_zero(&p->subsys->ref))
  1209. break;
  1210. up_read(&nvmet_config_sem);
  1211. return p->subsys;
  1212. }
  1213. }
  1214. up_read(&nvmet_config_sem);
  1215. return NULL;
  1216. }
  1217. struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
  1218. enum nvme_subsys_type type)
  1219. {
  1220. struct nvmet_subsys *subsys;
  1221. subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
  1222. if (!subsys)
  1223. return ERR_PTR(-ENOMEM);
  1224. subsys->ver = NVMET_DEFAULT_VS;
  1225. /* generate a random serial number as our controllers are ephemeral: */
  1226. get_random_bytes(&subsys->serial, sizeof(subsys->serial));
  1227. switch (type) {
  1228. case NVME_NQN_NVME:
  1229. subsys->max_qid = NVMET_NR_QUEUES;
  1230. break;
  1231. case NVME_NQN_DISC:
  1232. subsys->max_qid = 0;
  1233. break;
  1234. default:
  1235. pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
  1236. kfree(subsys);
  1237. return ERR_PTR(-EINVAL);
  1238. }
  1239. subsys->type = type;
  1240. subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
  1241. GFP_KERNEL);
  1242. if (!subsys->subsysnqn) {
  1243. kfree(subsys);
  1244. return ERR_PTR(-ENOMEM);
  1245. }
  1246. subsys->cntlid_min = NVME_CNTLID_MIN;
  1247. subsys->cntlid_max = NVME_CNTLID_MAX;
  1248. kref_init(&subsys->ref);
  1249. mutex_init(&subsys->lock);
  1250. xa_init(&subsys->namespaces);
  1251. INIT_LIST_HEAD(&subsys->ctrls);
  1252. INIT_LIST_HEAD(&subsys->hosts);
  1253. return subsys;
  1254. }
  1255. static void nvmet_subsys_free(struct kref *ref)
  1256. {
  1257. struct nvmet_subsys *subsys =
  1258. container_of(ref, struct nvmet_subsys, ref);
  1259. WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
  1260. xa_destroy(&subsys->namespaces);
  1261. nvmet_passthru_subsys_free(subsys);
  1262. kfree(subsys->subsysnqn);
  1263. kfree_rcu(subsys->model, rcuhead);
  1264. kfree(subsys);
  1265. }
  1266. void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
  1267. {
  1268. struct nvmet_ctrl *ctrl;
  1269. mutex_lock(&subsys->lock);
  1270. list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
  1271. ctrl->ops->delete_ctrl(ctrl);
  1272. mutex_unlock(&subsys->lock);
  1273. }
  1274. void nvmet_subsys_put(struct nvmet_subsys *subsys)
  1275. {
  1276. kref_put(&subsys->ref, nvmet_subsys_free);
  1277. }
  1278. static int __init nvmet_init(void)
  1279. {
  1280. int error;
  1281. nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
  1282. buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
  1283. WQ_MEM_RECLAIM, 0);
  1284. if (!buffered_io_wq) {
  1285. error = -ENOMEM;
  1286. goto out;
  1287. }
  1288. error = nvmet_init_discovery();
  1289. if (error)
  1290. goto out_free_work_queue;
  1291. error = nvmet_init_configfs();
  1292. if (error)
  1293. goto out_exit_discovery;
  1294. return 0;
  1295. out_exit_discovery:
  1296. nvmet_exit_discovery();
  1297. out_free_work_queue:
  1298. destroy_workqueue(buffered_io_wq);
  1299. out:
  1300. return error;
  1301. }
  1302. static void __exit nvmet_exit(void)
  1303. {
  1304. nvmet_exit_configfs();
  1305. nvmet_exit_discovery();
  1306. ida_destroy(&cntlid_ida);
  1307. destroy_workqueue(buffered_io_wq);
  1308. BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
  1309. BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
  1310. }
  1311. module_init(nvmet_init);
  1312. module_exit(nvmet_exit);
  1313. MODULE_LICENSE("GPL v2");
  1314. MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);