nvme.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 NXP Semiconductors
  4. * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
  5. */
  6. #include <common.h>
  7. #include <dm.h>
  8. #include <errno.h>
  9. #include <memalign.h>
  10. #include <pci.h>
  11. #include <time.h>
  12. #include <dm/device-internal.h>
  13. #include "nvme.h"
  14. #define NVME_Q_DEPTH 2
  15. #define NVME_AQ_DEPTH 2
  16. #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
  17. #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
  18. #define ADMIN_TIMEOUT 60
  19. #define IO_TIMEOUT 30
  20. #define MAX_PRP_POOL 512
  21. enum nvme_queue_id {
  22. NVME_ADMIN_Q,
  23. NVME_IO_Q,
  24. NVME_Q_NUM,
  25. };
  26. /*
  27. * An NVM Express queue. Each device has at least two (one for admin
  28. * commands and one for I/O commands).
  29. */
  30. struct nvme_queue {
  31. struct nvme_dev *dev;
  32. struct nvme_command *sq_cmds;
  33. struct nvme_completion *cqes;
  34. wait_queue_head_t sq_full;
  35. u32 __iomem *q_db;
  36. u16 q_depth;
  37. s16 cq_vector;
  38. u16 sq_head;
  39. u16 sq_tail;
  40. u16 cq_head;
  41. u16 qid;
  42. u8 cq_phase;
  43. u8 cqe_seen;
  44. unsigned long cmdid_data[];
  45. };
  46. static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
  47. {
  48. u32 bit = enabled ? NVME_CSTS_RDY : 0;
  49. int timeout;
  50. ulong start;
  51. /* Timeout field in the CAP register is in 500 millisecond units */
  52. timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
  53. start = get_timer(0);
  54. while (get_timer(start) < timeout) {
  55. if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
  56. return 0;
  57. }
  58. return -ETIME;
  59. }
  60. static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
  61. int total_len, u64 dma_addr)
  62. {
  63. u32 page_size = dev->page_size;
  64. int offset = dma_addr & (page_size - 1);
  65. u64 *prp_pool;
  66. int length = total_len;
  67. int i, nprps;
  68. u32 prps_per_page = (page_size >> 3) - 1;
  69. u32 num_pages;
  70. length -= (page_size - offset);
  71. if (length <= 0) {
  72. *prp2 = 0;
  73. return 0;
  74. }
  75. if (length)
  76. dma_addr += (page_size - offset);
  77. if (length <= page_size) {
  78. *prp2 = dma_addr;
  79. return 0;
  80. }
  81. nprps = DIV_ROUND_UP(length, page_size);
  82. num_pages = DIV_ROUND_UP(nprps, prps_per_page);
  83. if (nprps > dev->prp_entry_num) {
  84. free(dev->prp_pool);
  85. /*
  86. * Always increase in increments of pages. It doesn't waste
  87. * much memory and reduces the number of allocations.
  88. */
  89. dev->prp_pool = memalign(page_size, num_pages * page_size);
  90. if (!dev->prp_pool) {
  91. printf("Error: malloc prp_pool fail\n");
  92. return -ENOMEM;
  93. }
  94. dev->prp_entry_num = prps_per_page * num_pages;
  95. }
  96. prp_pool = dev->prp_pool;
  97. i = 0;
  98. while (nprps) {
  99. if (i == ((page_size >> 3) - 1)) {
  100. *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
  101. page_size);
  102. i = 0;
  103. prp_pool += page_size;
  104. }
  105. *(prp_pool + i++) = cpu_to_le64(dma_addr);
  106. dma_addr += page_size;
  107. nprps--;
  108. }
  109. *prp2 = (ulong)dev->prp_pool;
  110. flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
  111. dev->prp_entry_num * sizeof(u64));
  112. return 0;
  113. }
  114. static __le16 nvme_get_cmd_id(void)
  115. {
  116. static unsigned short cmdid;
  117. return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
  118. }
  119. static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
  120. {
  121. u64 start = (ulong)&nvmeq->cqes[index];
  122. u64 stop = start + sizeof(struct nvme_completion);
  123. invalidate_dcache_range(start, stop);
  124. return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
  125. }
  126. /**
  127. * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
  128. *
  129. * @nvmeq: The queue to use
  130. * @cmd: The command to send
  131. */
  132. static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
  133. {
  134. u16 tail = nvmeq->sq_tail;
  135. memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
  136. flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
  137. (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
  138. if (++tail == nvmeq->q_depth)
  139. tail = 0;
  140. writel(tail, nvmeq->q_db);
  141. nvmeq->sq_tail = tail;
  142. }
  143. static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
  144. struct nvme_command *cmd,
  145. u32 *result, unsigned timeout)
  146. {
  147. u16 head = nvmeq->cq_head;
  148. u16 phase = nvmeq->cq_phase;
  149. u16 status;
  150. ulong start_time;
  151. ulong timeout_us = timeout * 100000;
  152. cmd->common.command_id = nvme_get_cmd_id();
  153. nvme_submit_cmd(nvmeq, cmd);
  154. start_time = timer_get_us();
  155. for (;;) {
  156. status = nvme_read_completion_status(nvmeq, head);
  157. if ((status & 0x01) == phase)
  158. break;
  159. if (timeout_us > 0 && (timer_get_us() - start_time)
  160. >= timeout_us)
  161. return -ETIMEDOUT;
  162. }
  163. status >>= 1;
  164. if (status) {
  165. printf("ERROR: status = %x, phase = %d, head = %d\n",
  166. status, phase, head);
  167. status = 0;
  168. if (++head == nvmeq->q_depth) {
  169. head = 0;
  170. phase = !phase;
  171. }
  172. writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
  173. nvmeq->cq_head = head;
  174. nvmeq->cq_phase = phase;
  175. return -EIO;
  176. }
  177. if (result)
  178. *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
  179. if (++head == nvmeq->q_depth) {
  180. head = 0;
  181. phase = !phase;
  182. }
  183. writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
  184. nvmeq->cq_head = head;
  185. nvmeq->cq_phase = phase;
  186. return status;
  187. }
  188. static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
  189. u32 *result)
  190. {
  191. return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
  192. result, ADMIN_TIMEOUT);
  193. }
  194. static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
  195. int qid, int depth)
  196. {
  197. struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
  198. if (!nvmeq)
  199. return NULL;
  200. memset(nvmeq, 0, sizeof(*nvmeq));
  201. nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth));
  202. if (!nvmeq->cqes)
  203. goto free_nvmeq;
  204. memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
  205. nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
  206. if (!nvmeq->sq_cmds)
  207. goto free_queue;
  208. memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
  209. nvmeq->dev = dev;
  210. nvmeq->cq_head = 0;
  211. nvmeq->cq_phase = 1;
  212. nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
  213. nvmeq->q_depth = depth;
  214. nvmeq->qid = qid;
  215. dev->queue_count++;
  216. dev->queues[qid] = nvmeq;
  217. return nvmeq;
  218. free_queue:
  219. free((void *)nvmeq->cqes);
  220. free_nvmeq:
  221. free(nvmeq);
  222. return NULL;
  223. }
  224. static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
  225. {
  226. struct nvme_command c;
  227. memset(&c, 0, sizeof(c));
  228. c.delete_queue.opcode = opcode;
  229. c.delete_queue.qid = cpu_to_le16(id);
  230. return nvme_submit_admin_cmd(dev, &c, NULL);
  231. }
  232. static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
  233. {
  234. return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
  235. }
  236. static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
  237. {
  238. return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
  239. }
  240. static int nvme_enable_ctrl(struct nvme_dev *dev)
  241. {
  242. dev->ctrl_config &= ~NVME_CC_SHN_MASK;
  243. dev->ctrl_config |= NVME_CC_ENABLE;
  244. writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
  245. return nvme_wait_ready(dev, true);
  246. }
  247. static int nvme_disable_ctrl(struct nvme_dev *dev)
  248. {
  249. dev->ctrl_config &= ~NVME_CC_SHN_MASK;
  250. dev->ctrl_config &= ~NVME_CC_ENABLE;
  251. writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
  252. return nvme_wait_ready(dev, false);
  253. }
  254. static void nvme_free_queue(struct nvme_queue *nvmeq)
  255. {
  256. free((void *)nvmeq->cqes);
  257. free(nvmeq->sq_cmds);
  258. free(nvmeq);
  259. }
  260. static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  261. {
  262. int i;
  263. for (i = dev->queue_count - 1; i >= lowest; i--) {
  264. struct nvme_queue *nvmeq = dev->queues[i];
  265. dev->queue_count--;
  266. dev->queues[i] = NULL;
  267. nvme_free_queue(nvmeq);
  268. }
  269. }
  270. static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
  271. {
  272. struct nvme_dev *dev = nvmeq->dev;
  273. nvmeq->sq_tail = 0;
  274. nvmeq->cq_head = 0;
  275. nvmeq->cq_phase = 1;
  276. nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
  277. memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
  278. flush_dcache_range((ulong)nvmeq->cqes,
  279. (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
  280. dev->online_queues++;
  281. }
  282. static int nvme_configure_admin_queue(struct nvme_dev *dev)
  283. {
  284. int result;
  285. u32 aqa;
  286. u64 cap = dev->cap;
  287. struct nvme_queue *nvmeq;
  288. /* most architectures use 4KB as the page size */
  289. unsigned page_shift = 12;
  290. unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
  291. unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
  292. if (page_shift < dev_page_min) {
  293. debug("Device minimum page size (%u) too large for host (%u)\n",
  294. 1 << dev_page_min, 1 << page_shift);
  295. return -ENODEV;
  296. }
  297. if (page_shift > dev_page_max) {
  298. debug("Device maximum page size (%u) smaller than host (%u)\n",
  299. 1 << dev_page_max, 1 << page_shift);
  300. page_shift = dev_page_max;
  301. }
  302. result = nvme_disable_ctrl(dev);
  303. if (result < 0)
  304. return result;
  305. nvmeq = dev->queues[NVME_ADMIN_Q];
  306. if (!nvmeq) {
  307. nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
  308. if (!nvmeq)
  309. return -ENOMEM;
  310. }
  311. aqa = nvmeq->q_depth - 1;
  312. aqa |= aqa << 16;
  313. aqa |= aqa << 16;
  314. dev->page_size = 1 << page_shift;
  315. dev->ctrl_config = NVME_CC_CSS_NVM;
  316. dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
  317. dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
  318. dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
  319. writel(aqa, &dev->bar->aqa);
  320. nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
  321. nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
  322. result = nvme_enable_ctrl(dev);
  323. if (result)
  324. goto free_nvmeq;
  325. nvmeq->cq_vector = 0;
  326. nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
  327. return result;
  328. free_nvmeq:
  329. nvme_free_queues(dev, 0);
  330. return result;
  331. }
  332. static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
  333. struct nvme_queue *nvmeq)
  334. {
  335. struct nvme_command c;
  336. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
  337. memset(&c, 0, sizeof(c));
  338. c.create_cq.opcode = nvme_admin_create_cq;
  339. c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
  340. c.create_cq.cqid = cpu_to_le16(qid);
  341. c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  342. c.create_cq.cq_flags = cpu_to_le16(flags);
  343. c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
  344. return nvme_submit_admin_cmd(dev, &c, NULL);
  345. }
  346. static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
  347. struct nvme_queue *nvmeq)
  348. {
  349. struct nvme_command c;
  350. int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
  351. memset(&c, 0, sizeof(c));
  352. c.create_sq.opcode = nvme_admin_create_sq;
  353. c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
  354. c.create_sq.sqid = cpu_to_le16(qid);
  355. c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
  356. c.create_sq.sq_flags = cpu_to_le16(flags);
  357. c.create_sq.cqid = cpu_to_le16(qid);
  358. return nvme_submit_admin_cmd(dev, &c, NULL);
  359. }
  360. int nvme_identify(struct nvme_dev *dev, unsigned nsid,
  361. unsigned cns, dma_addr_t dma_addr)
  362. {
  363. struct nvme_command c;
  364. u32 page_size = dev->page_size;
  365. int offset = dma_addr & (page_size - 1);
  366. int length = sizeof(struct nvme_id_ctrl);
  367. int ret;
  368. memset(&c, 0, sizeof(c));
  369. c.identify.opcode = nvme_admin_identify;
  370. c.identify.nsid = cpu_to_le32(nsid);
  371. c.identify.prp1 = cpu_to_le64(dma_addr);
  372. length -= (page_size - offset);
  373. if (length <= 0) {
  374. c.identify.prp2 = 0;
  375. } else {
  376. dma_addr += (page_size - offset);
  377. c.identify.prp2 = cpu_to_le64(dma_addr);
  378. }
  379. c.identify.cns = cpu_to_le32(cns);
  380. ret = nvme_submit_admin_cmd(dev, &c, NULL);
  381. if (!ret)
  382. invalidate_dcache_range(dma_addr,
  383. dma_addr + sizeof(struct nvme_id_ctrl));
  384. return ret;
  385. }
  386. int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
  387. dma_addr_t dma_addr, u32 *result)
  388. {
  389. struct nvme_command c;
  390. memset(&c, 0, sizeof(c));
  391. c.features.opcode = nvme_admin_get_features;
  392. c.features.nsid = cpu_to_le32(nsid);
  393. c.features.prp1 = cpu_to_le64(dma_addr);
  394. c.features.fid = cpu_to_le32(fid);
  395. /*
  396. * TODO: add cache invalidate operation when the size of
  397. * the DMA buffer is known
  398. */
  399. return nvme_submit_admin_cmd(dev, &c, result);
  400. }
  401. int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
  402. dma_addr_t dma_addr, u32 *result)
  403. {
  404. struct nvme_command c;
  405. memset(&c, 0, sizeof(c));
  406. c.features.opcode = nvme_admin_set_features;
  407. c.features.prp1 = cpu_to_le64(dma_addr);
  408. c.features.fid = cpu_to_le32(fid);
  409. c.features.dword11 = cpu_to_le32(dword11);
  410. /*
  411. * TODO: add cache flush operation when the size of
  412. * the DMA buffer is known
  413. */
  414. return nvme_submit_admin_cmd(dev, &c, result);
  415. }
  416. static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
  417. {
  418. struct nvme_dev *dev = nvmeq->dev;
  419. int result;
  420. nvmeq->cq_vector = qid - 1;
  421. result = nvme_alloc_cq(dev, qid, nvmeq);
  422. if (result < 0)
  423. goto release_cq;
  424. result = nvme_alloc_sq(dev, qid, nvmeq);
  425. if (result < 0)
  426. goto release_sq;
  427. nvme_init_queue(nvmeq, qid);
  428. return result;
  429. release_sq:
  430. nvme_delete_sq(dev, qid);
  431. release_cq:
  432. nvme_delete_cq(dev, qid);
  433. return result;
  434. }
  435. static int nvme_set_queue_count(struct nvme_dev *dev, int count)
  436. {
  437. int status;
  438. u32 result;
  439. u32 q_count = (count - 1) | ((count - 1) << 16);
  440. status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
  441. q_count, 0, &result);
  442. if (status < 0)
  443. return status;
  444. if (status > 1)
  445. return 0;
  446. return min(result & 0xffff, result >> 16) + 1;
  447. }
  448. static void nvme_create_io_queues(struct nvme_dev *dev)
  449. {
  450. unsigned int i;
  451. for (i = dev->queue_count; i <= dev->max_qid; i++)
  452. if (!nvme_alloc_queue(dev, i, dev->q_depth))
  453. break;
  454. for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
  455. if (nvme_create_queue(dev->queues[i], i))
  456. break;
  457. }
  458. static int nvme_setup_io_queues(struct nvme_dev *dev)
  459. {
  460. int nr_io_queues;
  461. int result;
  462. nr_io_queues = 1;
  463. result = nvme_set_queue_count(dev, nr_io_queues);
  464. if (result <= 0)
  465. return result;
  466. dev->max_qid = nr_io_queues;
  467. /* Free previously allocated queues */
  468. nvme_free_queues(dev, nr_io_queues + 1);
  469. nvme_create_io_queues(dev);
  470. return 0;
  471. }
  472. static int nvme_get_info_from_identify(struct nvme_dev *dev)
  473. {
  474. struct nvme_id_ctrl *ctrl;
  475. int ret;
  476. int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
  477. ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
  478. if (!ctrl)
  479. return -ENOMEM;
  480. ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
  481. if (ret) {
  482. free(ctrl);
  483. return -EIO;
  484. }
  485. dev->nn = le32_to_cpu(ctrl->nn);
  486. dev->vwc = ctrl->vwc;
  487. memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
  488. memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
  489. memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
  490. if (ctrl->mdts)
  491. dev->max_transfer_shift = (ctrl->mdts + shift);
  492. else {
  493. /*
  494. * Maximum Data Transfer Size (MDTS) field indicates the maximum
  495. * data transfer size between the host and the controller. The
  496. * host should not submit a command that exceeds this transfer
  497. * size. The value is in units of the minimum memory page size
  498. * and is reported as a power of two (2^n).
  499. *
  500. * The spec also says: a value of 0h indicates no restrictions
  501. * on transfer size. But in nvme_blk_read/write() below we have
  502. * the following algorithm for maximum number of logic blocks
  503. * per transfer:
  504. *
  505. * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
  506. *
  507. * In order for lbas not to overflow, the maximum number is 15
  508. * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
  509. * Let's use 20 which provides 1MB size.
  510. */
  511. dev->max_transfer_shift = 20;
  512. }
  513. free(ctrl);
  514. return 0;
  515. }
  516. int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
  517. {
  518. struct nvme_ns *ns = dev_get_priv(udev);
  519. if (ns_id)
  520. *ns_id = ns->ns_id;
  521. if (eui64)
  522. memcpy(eui64, ns->eui64, sizeof(ns->eui64));
  523. return 0;
  524. }
  525. int nvme_scan_namespace(void)
  526. {
  527. struct uclass *uc;
  528. struct udevice *dev;
  529. int ret;
  530. ret = uclass_get(UCLASS_NVME, &uc);
  531. if (ret)
  532. return ret;
  533. uclass_foreach_dev(dev, uc) {
  534. ret = device_probe(dev);
  535. if (ret)
  536. return ret;
  537. }
  538. return 0;
  539. }
  540. static int nvme_blk_probe(struct udevice *udev)
  541. {
  542. struct nvme_dev *ndev = dev_get_priv(udev->parent);
  543. struct blk_desc *desc = dev_get_uclass_platdata(udev);
  544. struct nvme_ns *ns = dev_get_priv(udev);
  545. u8 flbas;
  546. struct pci_child_platdata *pplat;
  547. struct nvme_id_ns *id;
  548. id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
  549. if (!id)
  550. return -ENOMEM;
  551. memset(ns, 0, sizeof(*ns));
  552. ns->dev = ndev;
  553. /* extract the namespace id from the block device name */
  554. ns->ns_id = trailing_strtol(udev->name) + 1;
  555. if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
  556. free(id);
  557. return -EIO;
  558. }
  559. memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
  560. flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
  561. ns->flbas = flbas;
  562. ns->lba_shift = id->lbaf[flbas].ds;
  563. ns->mode_select_num_blocks = le64_to_cpu(id->nsze);
  564. ns->mode_select_block_len = 1 << ns->lba_shift;
  565. list_add(&ns->list, &ndev->namespaces);
  566. desc->lba = ns->mode_select_num_blocks;
  567. desc->log2blksz = ns->lba_shift;
  568. desc->blksz = 1 << ns->lba_shift;
  569. desc->bdev = udev;
  570. pplat = dev_get_parent_platdata(udev->parent);
  571. sprintf(desc->vendor, "0x%.4x", pplat->vendor);
  572. memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
  573. memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
  574. free(id);
  575. return 0;
  576. }
  577. static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
  578. lbaint_t blkcnt, void *buffer, bool read)
  579. {
  580. struct nvme_ns *ns = dev_get_priv(udev);
  581. struct nvme_dev *dev = ns->dev;
  582. struct nvme_command c;
  583. struct blk_desc *desc = dev_get_uclass_platdata(udev);
  584. int status;
  585. u64 prp2;
  586. u64 total_len = blkcnt << desc->log2blksz;
  587. u64 temp_len = total_len;
  588. u64 slba = blknr;
  589. u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
  590. u64 total_lbas = blkcnt;
  591. flush_dcache_range((unsigned long)buffer,
  592. (unsigned long)buffer + total_len);
  593. c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
  594. c.rw.flags = 0;
  595. c.rw.nsid = cpu_to_le32(ns->ns_id);
  596. c.rw.control = 0;
  597. c.rw.dsmgmt = 0;
  598. c.rw.reftag = 0;
  599. c.rw.apptag = 0;
  600. c.rw.appmask = 0;
  601. c.rw.metadata = 0;
  602. while (total_lbas) {
  603. if (total_lbas < lbas) {
  604. lbas = (u16)total_lbas;
  605. total_lbas = 0;
  606. } else {
  607. total_lbas -= lbas;
  608. }
  609. if (nvme_setup_prps(dev, &prp2,
  610. lbas << ns->lba_shift, (ulong)buffer))
  611. return -EIO;
  612. c.rw.slba = cpu_to_le64(slba);
  613. slba += lbas;
  614. c.rw.length = cpu_to_le16(lbas - 1);
  615. c.rw.prp1 = cpu_to_le64((ulong)buffer);
  616. c.rw.prp2 = cpu_to_le64(prp2);
  617. status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
  618. &c, NULL, IO_TIMEOUT);
  619. if (status)
  620. break;
  621. temp_len -= (u32)lbas << ns->lba_shift;
  622. buffer += lbas << ns->lba_shift;
  623. }
  624. if (read)
  625. invalidate_dcache_range((unsigned long)buffer,
  626. (unsigned long)buffer + total_len);
  627. return (total_len - temp_len) >> desc->log2blksz;
  628. }
  629. static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
  630. lbaint_t blkcnt, void *buffer)
  631. {
  632. return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
  633. }
  634. static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
  635. lbaint_t blkcnt, const void *buffer)
  636. {
  637. return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
  638. }
  639. static const struct blk_ops nvme_blk_ops = {
  640. .read = nvme_blk_read,
  641. .write = nvme_blk_write,
  642. };
  643. U_BOOT_DRIVER(nvme_blk) = {
  644. .name = "nvme-blk",
  645. .id = UCLASS_BLK,
  646. .probe = nvme_blk_probe,
  647. .ops = &nvme_blk_ops,
  648. .priv_auto_alloc_size = sizeof(struct nvme_ns),
  649. };
  650. static int nvme_bind(struct udevice *udev)
  651. {
  652. static int ndev_num;
  653. char name[20];
  654. sprintf(name, "nvme#%d", ndev_num++);
  655. return device_set_name(udev, name);
  656. }
  657. static int nvme_probe(struct udevice *udev)
  658. {
  659. int ret;
  660. struct nvme_dev *ndev = dev_get_priv(udev);
  661. ndev->instance = trailing_strtol(udev->name);
  662. INIT_LIST_HEAD(&ndev->namespaces);
  663. ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
  664. PCI_REGION_MEM);
  665. if (readl(&ndev->bar->csts) == -1) {
  666. ret = -ENODEV;
  667. printf("Error: %s: Out of memory!\n", udev->name);
  668. goto free_nvme;
  669. }
  670. ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
  671. if (!ndev->queues) {
  672. ret = -ENOMEM;
  673. printf("Error: %s: Out of memory!\n", udev->name);
  674. goto free_nvme;
  675. }
  676. memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
  677. ndev->cap = nvme_readq(&ndev->bar->cap);
  678. ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
  679. ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
  680. ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
  681. ret = nvme_configure_admin_queue(ndev);
  682. if (ret)
  683. goto free_queue;
  684. /* Allocate after the page size is known */
  685. ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
  686. if (!ndev->prp_pool) {
  687. ret = -ENOMEM;
  688. printf("Error: %s: Out of memory!\n", udev->name);
  689. goto free_nvme;
  690. }
  691. ndev->prp_entry_num = MAX_PRP_POOL >> 3;
  692. ret = nvme_setup_io_queues(ndev);
  693. if (ret)
  694. goto free_queue;
  695. nvme_get_info_from_identify(ndev);
  696. return 0;
  697. free_queue:
  698. free((void *)ndev->queues);
  699. free_nvme:
  700. return ret;
  701. }
  702. U_BOOT_DRIVER(nvme) = {
  703. .name = "nvme",
  704. .id = UCLASS_NVME,
  705. .bind = nvme_bind,
  706. .probe = nvme_probe,
  707. .priv_auto_alloc_size = sizeof(struct nvme_dev),
  708. };
  709. struct pci_device_id nvme_supported[] = {
  710. { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
  711. {}
  712. };
  713. U_BOOT_PCI_DEVICE(nvme, nvme_supported);