io-cmd-bdev.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NVMe I/O command implementation.
  4. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/blkdev.h>
  8. #include <linux/module.h>
  9. #include "nvmet.h"
  10. void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
  11. {
  12. const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
  13. /* Number of logical blocks per physical block. */
  14. const u32 lpp = ql->physical_block_size / ql->logical_block_size;
  15. /* Logical blocks per physical block, 0's based. */
  16. const __le16 lpp0b = to0based(lpp);
  17. /*
  18. * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
  19. * NAWUPF, and NACWU are defined for this namespace and should be
  20. * used by the host for this namespace instead of the AWUN, AWUPF,
  21. * and ACWU fields in the Identify Controller data structure. If
  22. * any of these fields are zero that means that the corresponding
  23. * field from the identify controller data structure should be used.
  24. */
  25. id->nsfeat |= 1 << 1;
  26. id->nawun = lpp0b;
  27. id->nawupf = lpp0b;
  28. id->nacwu = lpp0b;
  29. /*
  30. * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
  31. * NOWS are defined for this namespace and should be used by
  32. * the host for I/O optimization.
  33. */
  34. id->nsfeat |= 1 << 4;
  35. /* NPWG = Namespace Preferred Write Granularity. 0's based */
  36. id->npwg = lpp0b;
  37. /* NPWA = Namespace Preferred Write Alignment. 0's based */
  38. id->npwa = id->npwg;
  39. /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
  40. id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
  41. /* NPDG = Namespace Preferred Deallocate Alignment */
  42. id->npda = id->npdg;
  43. /* NOWS = Namespace Optimal Write Size */
  44. id->nows = to0based(ql->io_opt / ql->logical_block_size);
  45. }
  46. static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
  47. {
  48. struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
  49. if (bi) {
  50. ns->metadata_size = bi->tuple_size;
  51. if (bi->profile == &t10_pi_type1_crc)
  52. ns->pi_type = NVME_NS_DPS_PI_TYPE1;
  53. else if (bi->profile == &t10_pi_type3_crc)
  54. ns->pi_type = NVME_NS_DPS_PI_TYPE3;
  55. else
  56. /* Unsupported metadata type */
  57. ns->metadata_size = 0;
  58. }
  59. }
  60. int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
  61. {
  62. int ret;
  63. ns->bdev = blkdev_get_by_path(ns->device_path,
  64. FMODE_READ | FMODE_WRITE, NULL);
  65. if (IS_ERR(ns->bdev)) {
  66. ret = PTR_ERR(ns->bdev);
  67. if (ret != -ENOTBLK) {
  68. pr_err("failed to open block device %s: (%ld)\n",
  69. ns->device_path, PTR_ERR(ns->bdev));
  70. }
  71. ns->bdev = NULL;
  72. return ret;
  73. }
  74. ns->size = i_size_read(ns->bdev->bd_inode);
  75. ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
  76. ns->pi_type = 0;
  77. ns->metadata_size = 0;
  78. if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
  79. nvmet_bdev_ns_enable_integrity(ns);
  80. return 0;
  81. }
  82. void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
  83. {
  84. if (ns->bdev) {
  85. blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
  86. ns->bdev = NULL;
  87. }
  88. }
  89. void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
  90. {
  91. ns->size = i_size_read(ns->bdev->bd_inode);
  92. }
  93. static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
  94. {
  95. u16 status = NVME_SC_SUCCESS;
  96. if (likely(blk_sts == BLK_STS_OK))
  97. return status;
  98. /*
  99. * Right now there exists M : 1 mapping between block layer error
  100. * to the NVMe status code (see nvme_error_status()). For consistency,
  101. * when we reverse map we use most appropriate NVMe Status code from
  102. * the group of the NVMe staus codes used in the nvme_error_status().
  103. */
  104. switch (blk_sts) {
  105. case BLK_STS_NOSPC:
  106. status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
  107. req->error_loc = offsetof(struct nvme_rw_command, length);
  108. break;
  109. case BLK_STS_TARGET:
  110. status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
  111. req->error_loc = offsetof(struct nvme_rw_command, slba);
  112. break;
  113. case BLK_STS_NOTSUPP:
  114. req->error_loc = offsetof(struct nvme_common_command, opcode);
  115. switch (req->cmd->common.opcode) {
  116. case nvme_cmd_dsm:
  117. case nvme_cmd_write_zeroes:
  118. status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
  119. break;
  120. default:
  121. status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  122. }
  123. break;
  124. case BLK_STS_MEDIUM:
  125. status = NVME_SC_ACCESS_DENIED;
  126. req->error_loc = offsetof(struct nvme_rw_command, nsid);
  127. break;
  128. case BLK_STS_IOERR:
  129. default:
  130. status = NVME_SC_INTERNAL | NVME_SC_DNR;
  131. req->error_loc = offsetof(struct nvme_common_command, opcode);
  132. }
  133. switch (req->cmd->common.opcode) {
  134. case nvme_cmd_read:
  135. case nvme_cmd_write:
  136. req->error_slba = le64_to_cpu(req->cmd->rw.slba);
  137. break;
  138. case nvme_cmd_write_zeroes:
  139. req->error_slba =
  140. le64_to_cpu(req->cmd->write_zeroes.slba);
  141. break;
  142. default:
  143. req->error_slba = 0;
  144. }
  145. return status;
  146. }
  147. static void nvmet_bio_done(struct bio *bio)
  148. {
  149. struct nvmet_req *req = bio->bi_private;
  150. nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
  151. if (bio != &req->b.inline_bio)
  152. bio_put(bio);
  153. }
  154. #ifdef CONFIG_BLK_DEV_INTEGRITY
  155. static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
  156. struct sg_mapping_iter *miter)
  157. {
  158. struct blk_integrity *bi;
  159. struct bio_integrity_payload *bip;
  160. struct block_device *bdev = req->ns->bdev;
  161. int rc;
  162. size_t resid, len;
  163. bi = bdev_get_integrity(bdev);
  164. if (unlikely(!bi)) {
  165. pr_err("Unable to locate bio_integrity\n");
  166. return -ENODEV;
  167. }
  168. bip = bio_integrity_alloc(bio, GFP_NOIO,
  169. min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
  170. if (IS_ERR(bip)) {
  171. pr_err("Unable to allocate bio_integrity_payload\n");
  172. return PTR_ERR(bip);
  173. }
  174. bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
  175. /* virtual start sector must be in integrity interval units */
  176. bip_set_seed(bip, bio->bi_iter.bi_sector >>
  177. (bi->interval_exp - SECTOR_SHIFT));
  178. resid = bip->bip_iter.bi_size;
  179. while (resid > 0 && sg_miter_next(miter)) {
  180. len = min_t(size_t, miter->length, resid);
  181. rc = bio_integrity_add_page(bio, miter->page, len,
  182. offset_in_page(miter->addr));
  183. if (unlikely(rc != len)) {
  184. pr_err("bio_integrity_add_page() failed; %d\n", rc);
  185. sg_miter_stop(miter);
  186. return -ENOMEM;
  187. }
  188. resid -= len;
  189. if (len < miter->length)
  190. miter->consumed -= miter->length - len;
  191. }
  192. sg_miter_stop(miter);
  193. return 0;
  194. }
  195. #else
  196. static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
  197. struct sg_mapping_iter *miter)
  198. {
  199. return -EINVAL;
  200. }
  201. #endif /* CONFIG_BLK_DEV_INTEGRITY */
  202. static void nvmet_bdev_execute_rw(struct nvmet_req *req)
  203. {
  204. int sg_cnt = req->sg_cnt;
  205. struct bio *bio;
  206. struct scatterlist *sg;
  207. struct blk_plug plug;
  208. sector_t sector;
  209. int op, i, rc;
  210. struct sg_mapping_iter prot_miter;
  211. unsigned int iter_flags;
  212. unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
  213. if (!nvmet_check_transfer_len(req, total_len))
  214. return;
  215. if (!req->sg_cnt) {
  216. nvmet_req_complete(req, 0);
  217. return;
  218. }
  219. if (req->cmd->rw.opcode == nvme_cmd_write) {
  220. op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
  221. if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
  222. op |= REQ_FUA;
  223. iter_flags = SG_MITER_TO_SG;
  224. } else {
  225. op = REQ_OP_READ;
  226. iter_flags = SG_MITER_FROM_SG;
  227. }
  228. if (is_pci_p2pdma_page(sg_page(req->sg)))
  229. op |= REQ_NOMERGE;
  230. sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
  231. if (nvmet_use_inline_bvec(req)) {
  232. bio = &req->b.inline_bio;
  233. bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
  234. } else {
  235. bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
  236. }
  237. bio_set_dev(bio, req->ns->bdev);
  238. bio->bi_iter.bi_sector = sector;
  239. bio->bi_private = req;
  240. bio->bi_end_io = nvmet_bio_done;
  241. bio->bi_opf = op;
  242. blk_start_plug(&plug);
  243. if (req->metadata_len)
  244. sg_miter_start(&prot_miter, req->metadata_sg,
  245. req->metadata_sg_cnt, iter_flags);
  246. for_each_sg(req->sg, sg, req->sg_cnt, i) {
  247. while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
  248. != sg->length) {
  249. struct bio *prev = bio;
  250. if (req->metadata_len) {
  251. rc = nvmet_bdev_alloc_bip(req, bio,
  252. &prot_miter);
  253. if (unlikely(rc)) {
  254. bio_io_error(bio);
  255. return;
  256. }
  257. }
  258. bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
  259. bio_set_dev(bio, req->ns->bdev);
  260. bio->bi_iter.bi_sector = sector;
  261. bio->bi_opf = op;
  262. bio_chain(bio, prev);
  263. submit_bio(prev);
  264. }
  265. sector += sg->length >> 9;
  266. sg_cnt--;
  267. }
  268. if (req->metadata_len) {
  269. rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
  270. if (unlikely(rc)) {
  271. bio_io_error(bio);
  272. return;
  273. }
  274. }
  275. submit_bio(bio);
  276. blk_finish_plug(&plug);
  277. }
  278. static void nvmet_bdev_execute_flush(struct nvmet_req *req)
  279. {
  280. struct bio *bio = &req->b.inline_bio;
  281. if (!nvmet_check_transfer_len(req, 0))
  282. return;
  283. bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
  284. bio_set_dev(bio, req->ns->bdev);
  285. bio->bi_private = req;
  286. bio->bi_end_io = nvmet_bio_done;
  287. bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
  288. submit_bio(bio);
  289. }
  290. u16 nvmet_bdev_flush(struct nvmet_req *req)
  291. {
  292. if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
  293. return NVME_SC_INTERNAL | NVME_SC_DNR;
  294. return 0;
  295. }
  296. static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
  297. struct nvme_dsm_range *range, struct bio **bio)
  298. {
  299. struct nvmet_ns *ns = req->ns;
  300. int ret;
  301. ret = __blkdev_issue_discard(ns->bdev,
  302. nvmet_lba_to_sect(ns, range->slba),
  303. le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
  304. GFP_KERNEL, 0, bio);
  305. if (ret && ret != -EOPNOTSUPP) {
  306. req->error_slba = le64_to_cpu(range->slba);
  307. return errno_to_nvme_status(req, ret);
  308. }
  309. return NVME_SC_SUCCESS;
  310. }
  311. static void nvmet_bdev_execute_discard(struct nvmet_req *req)
  312. {
  313. struct nvme_dsm_range range;
  314. struct bio *bio = NULL;
  315. int i;
  316. u16 status;
  317. for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
  318. status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
  319. sizeof(range));
  320. if (status)
  321. break;
  322. status = nvmet_bdev_discard_range(req, &range, &bio);
  323. if (status)
  324. break;
  325. }
  326. if (bio) {
  327. bio->bi_private = req;
  328. bio->bi_end_io = nvmet_bio_done;
  329. if (status)
  330. bio_io_error(bio);
  331. else
  332. submit_bio(bio);
  333. } else {
  334. nvmet_req_complete(req, status);
  335. }
  336. }
  337. static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
  338. {
  339. if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
  340. return;
  341. switch (le32_to_cpu(req->cmd->dsm.attributes)) {
  342. case NVME_DSMGMT_AD:
  343. nvmet_bdev_execute_discard(req);
  344. return;
  345. case NVME_DSMGMT_IDR:
  346. case NVME_DSMGMT_IDW:
  347. default:
  348. /* Not supported yet */
  349. nvmet_req_complete(req, 0);
  350. return;
  351. }
  352. }
  353. static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
  354. {
  355. struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
  356. struct bio *bio = NULL;
  357. sector_t sector;
  358. sector_t nr_sector;
  359. int ret;
  360. if (!nvmet_check_transfer_len(req, 0))
  361. return;
  362. sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
  363. nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
  364. (req->ns->blksize_shift - 9));
  365. ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
  366. GFP_KERNEL, &bio, 0);
  367. if (bio) {
  368. bio->bi_private = req;
  369. bio->bi_end_io = nvmet_bio_done;
  370. submit_bio(bio);
  371. } else {
  372. nvmet_req_complete(req, errno_to_nvme_status(req, ret));
  373. }
  374. }
  375. u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
  376. {
  377. struct nvme_command *cmd = req->cmd;
  378. switch (cmd->common.opcode) {
  379. case nvme_cmd_read:
  380. case nvme_cmd_write:
  381. req->execute = nvmet_bdev_execute_rw;
  382. if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
  383. req->metadata_len = nvmet_rw_metadata_len(req);
  384. return 0;
  385. case nvme_cmd_flush:
  386. req->execute = nvmet_bdev_execute_flush;
  387. return 0;
  388. case nvme_cmd_dsm:
  389. req->execute = nvmet_bdev_execute_dsm;
  390. return 0;
  391. case nvme_cmd_write_zeroes:
  392. req->execute = nvmet_bdev_execute_write_zeroes;
  393. return 0;
  394. default:
  395. pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
  396. req->sq->qid);
  397. req->error_loc = offsetof(struct nvme_common_command, opcode);
  398. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  399. }
  400. }