zynqmp_gqspi.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2018 Xilinx
  4. *
  5. * Xilinx ZynqMP Generic Quad-SPI(QSPI) controller driver(master mode only)
  6. */
  7. #include <common.h>
  8. #include <cpu_func.h>
  9. #include <log.h>
  10. #include <asm/arch/sys_proto.h>
  11. #include <asm/cache.h>
  12. #include <asm/io.h>
  13. #include <clk.h>
  14. #include <dm.h>
  15. #include <malloc.h>
  16. #include <memalign.h>
  17. #include <spi.h>
  18. #include <spi-mem.h>
  19. #include <ubi_uboot.h>
  20. #include <wait_bit.h>
  21. #include <dm/device_compat.h>
  22. #include <linux/bitops.h>
  23. #include <linux/err.h>
  24. #define GQSPI_GFIFO_STRT_MODE_MASK BIT(29)
  25. #define GQSPI_CONFIG_MODE_EN_MASK (3 << 30)
  26. #define GQSPI_CONFIG_DMA_MODE (2 << 30)
  27. #define GQSPI_CONFIG_CPHA_MASK BIT(2)
  28. #define GQSPI_CONFIG_CPOL_MASK BIT(1)
  29. /*
  30. * QSPI Interrupt Registers bit Masks
  31. *
  32. * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
  33. * bit definitions.
  34. */
  35. #define GQSPI_IXR_TXNFULL_MASK 0x00000004 /* QSPI TX FIFO Overflow */
  36. #define GQSPI_IXR_TXFULL_MASK 0x00000008 /* QSPI TX FIFO is full */
  37. #define GQSPI_IXR_RXNEMTY_MASK 0x00000010 /* QSPI RX FIFO Not Empty */
  38. #define GQSPI_IXR_GFEMTY_MASK 0x00000080 /* QSPI Generic FIFO Empty */
  39. #define GQSPI_IXR_GFNFULL_MASK 0x00000200 /* QSPI GENFIFO not full */
  40. #define GQSPI_IXR_ALL_MASK (GQSPI_IXR_TXNFULL_MASK | \
  41. GQSPI_IXR_RXNEMTY_MASK)
  42. /*
  43. * QSPI Enable Register bit Masks
  44. *
  45. * This register is used to enable or disable the QSPI controller
  46. */
  47. #define GQSPI_ENABLE_ENABLE_MASK 0x00000001 /* QSPI Enable Bit Mask */
  48. #define GQSPI_GFIFO_LOW_BUS BIT(14)
  49. #define GQSPI_GFIFO_CS_LOWER BIT(12)
  50. #define GQSPI_GFIFO_UP_BUS BIT(15)
  51. #define GQSPI_GFIFO_CS_UPPER BIT(13)
  52. #define GQSPI_SPI_MODE_QSPI (3 << 10)
  53. #define GQSPI_SPI_MODE_SPI BIT(10)
  54. #define GQSPI_SPI_MODE_DUAL_SPI (2 << 10)
  55. #define GQSPI_IMD_DATA_CS_ASSERT 5
  56. #define GQSPI_IMD_DATA_CS_DEASSERT 5
  57. #define GQSPI_GFIFO_TX BIT(16)
  58. #define GQSPI_GFIFO_RX BIT(17)
  59. #define GQSPI_GFIFO_STRIPE_MASK BIT(18)
  60. #define GQSPI_GFIFO_IMD_MASK 0xFF
  61. #define GQSPI_GFIFO_EXP_MASK BIT(9)
  62. #define GQSPI_GFIFO_DATA_XFR_MASK BIT(8)
  63. #define GQSPI_STRT_GEN_FIFO BIT(28)
  64. #define GQSPI_GEN_FIFO_STRT_MOD BIT(29)
  65. #define GQSPI_GFIFO_WP_HOLD BIT(19)
  66. #define GQSPI_BAUD_DIV_MASK (7 << 3)
  67. #define GQSPI_DFLT_BAUD_RATE_DIV BIT(3)
  68. #define GQSPI_GFIFO_ALL_INT_MASK 0xFBE
  69. #define GQSPI_DMA_DST_I_STS_DONE BIT(1)
  70. #define GQSPI_DMA_DST_I_STS_MASK 0xFE
  71. #define MODEBITS 0x6
  72. #define GQSPI_GFIFO_SELECT BIT(0)
  73. #define GQSPI_FIFO_THRESHOLD 1
  74. #define GQSPI_GENFIFO_THRESHOLD 31
  75. #define SPI_XFER_ON_BOTH 0
  76. #define SPI_XFER_ON_LOWER 1
  77. #define SPI_XFER_ON_UPPER 2
  78. #define GQSPI_DMA_ALIGN 0x4
  79. #define GQSPI_MAX_BAUD_RATE_VAL 7
  80. #define GQSPI_DFLT_BAUD_RATE_VAL 2
  81. #define GQSPI_TIMEOUT 100000000
  82. #define GQSPI_BAUD_DIV_SHIFT 2
  83. #define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
  84. #define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
  85. #define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
  86. #define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
  87. #define GQSPI_USE_DATA_DLY 0x1
  88. #define GQSPI_USE_DATA_DLY_SHIFT 31
  89. #define GQSPI_DATA_DLY_ADJ_VALUE 0x2
  90. #define GQSPI_DATA_DLY_ADJ_SHIFT 28
  91. #define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
  92. #define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
  93. #define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
  94. #define IOU_TAPDLY_BYPASS_OFST 0xFF180390
  95. #define GQSPI_LPBK_DLY_ADJ_LPBK_MASK 0x00000020
  96. #define GQSPI_FREQ_40MHZ 40000000
  97. #define GQSPI_FREQ_100MHZ 100000000
  98. #define GQSPI_FREQ_150MHZ 150000000
  99. #define IOU_TAPDLY_BYPASS_MASK 0x7
  100. #define GQSPI_REG_OFFSET 0x100
  101. #define GQSPI_DMA_REG_OFFSET 0x800
  102. /* QSPI register offsets */
  103. struct zynqmp_qspi_regs {
  104. u32 confr; /* 0x00 */
  105. u32 isr; /* 0x04 */
  106. u32 ier; /* 0x08 */
  107. u32 idisr; /* 0x0C */
  108. u32 imaskr; /* 0x10 */
  109. u32 enbr; /* 0x14 */
  110. u32 dr; /* 0x18 */
  111. u32 txd0r; /* 0x1C */
  112. u32 drxr; /* 0x20 */
  113. u32 sicr; /* 0x24 */
  114. u32 txftr; /* 0x28 */
  115. u32 rxftr; /* 0x2C */
  116. u32 gpior; /* 0x30 */
  117. u32 reserved0; /* 0x34 */
  118. u32 lpbkdly; /* 0x38 */
  119. u32 reserved1; /* 0x3C */
  120. u32 genfifo; /* 0x40 */
  121. u32 gqspisel; /* 0x44 */
  122. u32 reserved2; /* 0x48 */
  123. u32 gqfifoctrl; /* 0x4C */
  124. u32 gqfthr; /* 0x50 */
  125. u32 gqpollcfg; /* 0x54 */
  126. u32 gqpollto; /* 0x58 */
  127. u32 gqxfersts; /* 0x5C */
  128. u32 gqfifosnap; /* 0x60 */
  129. u32 gqrxcpy; /* 0x64 */
  130. u32 reserved3[36]; /* 0x68 */
  131. u32 gqspidlyadj; /* 0xF8 */
  132. };
  133. struct zynqmp_qspi_dma_regs {
  134. u32 dmadst; /* 0x00 */
  135. u32 dmasize; /* 0x04 */
  136. u32 dmasts; /* 0x08 */
  137. u32 dmactrl; /* 0x0C */
  138. u32 reserved0; /* 0x10 */
  139. u32 dmaisr; /* 0x14 */
  140. u32 dmaier; /* 0x18 */
  141. u32 dmaidr; /* 0x1C */
  142. u32 dmaimr; /* 0x20 */
  143. u32 dmactrl2; /* 0x24 */
  144. u32 dmadstmsb; /* 0x28 */
  145. };
  146. struct zynqmp_qspi_plat {
  147. struct zynqmp_qspi_regs *regs;
  148. struct zynqmp_qspi_dma_regs *dma_regs;
  149. u32 frequency;
  150. u32 speed_hz;
  151. };
  152. struct zynqmp_qspi_priv {
  153. struct zynqmp_qspi_regs *regs;
  154. struct zynqmp_qspi_dma_regs *dma_regs;
  155. const void *tx_buf;
  156. void *rx_buf;
  157. unsigned int len;
  158. int bytes_to_transfer;
  159. int bytes_to_receive;
  160. const struct spi_mem_op *op;
  161. };
  162. static int zynqmp_qspi_of_to_plat(struct udevice *bus)
  163. {
  164. struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
  165. debug("%s\n", __func__);
  166. plat->regs = (struct zynqmp_qspi_regs *)(dev_read_addr(bus) +
  167. GQSPI_REG_OFFSET);
  168. plat->dma_regs = (struct zynqmp_qspi_dma_regs *)
  169. (dev_read_addr(bus) + GQSPI_DMA_REG_OFFSET);
  170. return 0;
  171. }
  172. static void zynqmp_qspi_init_hw(struct zynqmp_qspi_priv *priv)
  173. {
  174. u32 config_reg;
  175. struct zynqmp_qspi_regs *regs = priv->regs;
  176. writel(GQSPI_GFIFO_SELECT, &regs->gqspisel);
  177. writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->idisr);
  178. writel(GQSPI_FIFO_THRESHOLD, &regs->txftr);
  179. writel(GQSPI_FIFO_THRESHOLD, &regs->rxftr);
  180. writel(GQSPI_GENFIFO_THRESHOLD, &regs->gqfthr);
  181. writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->isr);
  182. writel(~GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  183. config_reg = readl(&regs->confr);
  184. config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK |
  185. GQSPI_CONFIG_MODE_EN_MASK);
  186. config_reg |= GQSPI_CONFIG_DMA_MODE | GQSPI_GFIFO_WP_HOLD |
  187. GQSPI_DFLT_BAUD_RATE_DIV | GQSPI_GFIFO_STRT_MODE_MASK;
  188. writel(config_reg, &regs->confr);
  189. writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  190. }
  191. static u32 zynqmp_qspi_bus_select(struct zynqmp_qspi_priv *priv)
  192. {
  193. u32 gqspi_fifo_reg = 0;
  194. gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS |
  195. GQSPI_GFIFO_CS_LOWER;
  196. return gqspi_fifo_reg;
  197. }
  198. static u32 zynqmp_qspi_genfifo_mode(u8 buswidth)
  199. {
  200. switch (buswidth) {
  201. case 1:
  202. return GQSPI_SPI_MODE_SPI;
  203. case 2:
  204. return GQSPI_SPI_MODE_DUAL_SPI;
  205. case 4:
  206. return GQSPI_SPI_MODE_QSPI;
  207. default:
  208. debug("Unsupported bus width %u\n", buswidth);
  209. return GQSPI_SPI_MODE_SPI;
  210. }
  211. }
  212. static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv,
  213. u32 gqspi_fifo_reg)
  214. {
  215. struct zynqmp_qspi_regs *regs = priv->regs;
  216. u32 config_reg, ier;
  217. int ret = 0;
  218. writel(gqspi_fifo_reg, &regs->genfifo);
  219. config_reg = readl(&regs->confr);
  220. /* Manual start if needed */
  221. config_reg |= GQSPI_STRT_GEN_FIFO;
  222. writel(config_reg, &regs->confr);
  223. /* Enable interrupts */
  224. ier = readl(&regs->ier);
  225. ier |= GQSPI_IXR_GFEMTY_MASK;
  226. writel(ier, &regs->ier);
  227. /* Wait until the gen fifo is empty to write the new command */
  228. ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_GFEMTY_MASK, 1,
  229. GQSPI_TIMEOUT, 1);
  230. if (ret)
  231. printf("%s Timeout\n", __func__);
  232. }
  233. static void zynqmp_qspi_chipselect(struct zynqmp_qspi_priv *priv, int is_on)
  234. {
  235. u32 gqspi_fifo_reg = 0;
  236. if (is_on) {
  237. gqspi_fifo_reg = zynqmp_qspi_bus_select(priv);
  238. gqspi_fifo_reg |= GQSPI_SPI_MODE_SPI |
  239. GQSPI_IMD_DATA_CS_ASSERT;
  240. } else {
  241. gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS;
  242. gqspi_fifo_reg |= GQSPI_IMD_DATA_CS_DEASSERT;
  243. }
  244. debug("GFIFO_CMD_CS: 0x%x\n", gqspi_fifo_reg);
  245. /* Dummy generic FIFO entry */
  246. zynqmp_qspi_fill_gen_fifo(priv, 0);
  247. zynqmp_qspi_fill_gen_fifo(priv, gqspi_fifo_reg);
  248. }
  249. void zynqmp_qspi_set_tapdelay(struct udevice *bus, u32 baudrateval)
  250. {
  251. struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
  252. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  253. struct zynqmp_qspi_regs *regs = priv->regs;
  254. u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
  255. u32 reqhz = 0;
  256. clk_rate = plat->frequency;
  257. reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
  258. debug("%s, req_hz:%d, clk_rate:%d, baudrateval:%d\n",
  259. __func__, reqhz, clk_rate, baudrateval);
  260. if (reqhz < GQSPI_FREQ_40MHZ) {
  261. zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
  262. tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
  263. TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
  264. } else if (reqhz <= GQSPI_FREQ_100MHZ) {
  265. zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
  266. tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
  267. TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
  268. lpbkdlyadj = readl(&regs->lpbkdly);
  269. lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_LPBK_MASK);
  270. datadlyadj = readl(&regs->gqspidlyadj);
  271. datadlyadj |= ((GQSPI_USE_DATA_DLY << GQSPI_USE_DATA_DLY_SHIFT)
  272. | (GQSPI_DATA_DLY_ADJ_VALUE <<
  273. GQSPI_DATA_DLY_ADJ_SHIFT));
  274. } else if (reqhz <= GQSPI_FREQ_150MHZ) {
  275. lpbkdlyadj = readl(&regs->lpbkdly);
  276. lpbkdlyadj |= ((GQSPI_LPBK_DLY_ADJ_LPBK_MASK) |
  277. GQSPI_LPBK_DLY_ADJ_DLY_0);
  278. }
  279. zynqmp_mmio_write(IOU_TAPDLY_BYPASS_OFST, IOU_TAPDLY_BYPASS_MASK,
  280. tapdlybypass);
  281. writel(lpbkdlyadj, &regs->lpbkdly);
  282. writel(datadlyadj, &regs->gqspidlyadj);
  283. }
  284. static int zynqmp_qspi_set_speed(struct udevice *bus, uint speed)
  285. {
  286. struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
  287. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  288. struct zynqmp_qspi_regs *regs = priv->regs;
  289. u32 confr;
  290. u8 baud_rate_val = 0;
  291. debug("%s\n", __func__);
  292. if (speed > plat->frequency)
  293. speed = plat->frequency;
  294. if (plat->speed_hz != speed) {
  295. /* Set the clock frequency */
  296. /* If speed == 0, default to lowest speed */
  297. while ((baud_rate_val < 8) &&
  298. ((plat->frequency /
  299. (2 << baud_rate_val)) > speed))
  300. baud_rate_val++;
  301. if (baud_rate_val > GQSPI_MAX_BAUD_RATE_VAL)
  302. baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
  303. plat->speed_hz = plat->frequency / (2 << baud_rate_val);
  304. confr = readl(&regs->confr);
  305. confr &= ~GQSPI_BAUD_DIV_MASK;
  306. confr |= (baud_rate_val << 3);
  307. writel(confr, &regs->confr);
  308. zynqmp_qspi_set_tapdelay(bus, baud_rate_val);
  309. debug("regs=%p, speed=%d\n", priv->regs, plat->speed_hz);
  310. }
  311. return 0;
  312. }
  313. static int zynqmp_qspi_probe(struct udevice *bus)
  314. {
  315. struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
  316. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  317. struct clk clk;
  318. unsigned long clock;
  319. int ret;
  320. debug("%s: bus:%p, priv:%p\n", __func__, bus, priv);
  321. priv->regs = plat->regs;
  322. priv->dma_regs = plat->dma_regs;
  323. ret = clk_get_by_index(bus, 0, &clk);
  324. if (ret < 0) {
  325. dev_err(bus, "failed to get clock\n");
  326. return ret;
  327. }
  328. clock = clk_get_rate(&clk);
  329. if (IS_ERR_VALUE(clock)) {
  330. dev_err(bus, "failed to get rate\n");
  331. return clock;
  332. }
  333. debug("%s: CLK %ld\n", __func__, clock);
  334. ret = clk_enable(&clk);
  335. if (ret) {
  336. dev_err(bus, "failed to enable clock\n");
  337. return ret;
  338. }
  339. plat->frequency = clock;
  340. plat->speed_hz = plat->frequency / 2;
  341. /* init the zynq spi hw */
  342. zynqmp_qspi_init_hw(priv);
  343. return 0;
  344. }
  345. static int zynqmp_qspi_set_mode(struct udevice *bus, uint mode)
  346. {
  347. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  348. struct zynqmp_qspi_regs *regs = priv->regs;
  349. u32 confr;
  350. debug("%s\n", __func__);
  351. /* Set the SPI Clock phase and polarities */
  352. confr = readl(&regs->confr);
  353. confr &= ~(GQSPI_CONFIG_CPHA_MASK |
  354. GQSPI_CONFIG_CPOL_MASK);
  355. if (mode & SPI_CPHA)
  356. confr |= GQSPI_CONFIG_CPHA_MASK;
  357. if (mode & SPI_CPOL)
  358. confr |= GQSPI_CONFIG_CPOL_MASK;
  359. writel(confr, &regs->confr);
  360. return 0;
  361. }
  362. static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size)
  363. {
  364. u32 data;
  365. int ret = 0;
  366. struct zynqmp_qspi_regs *regs = priv->regs;
  367. u32 *buf = (u32 *)priv->tx_buf;
  368. u32 len = size;
  369. debug("TxFIFO: 0x%x, size: 0x%x\n", readl(&regs->isr),
  370. size);
  371. while (size) {
  372. ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_TXNFULL_MASK, 1,
  373. GQSPI_TIMEOUT, 1);
  374. if (ret) {
  375. printf("%s: Timeout\n", __func__);
  376. return ret;
  377. }
  378. if (size >= 4) {
  379. writel(*buf, &regs->txd0r);
  380. buf++;
  381. size -= 4;
  382. } else {
  383. switch (size) {
  384. case 1:
  385. data = *((u8 *)buf);
  386. buf += 1;
  387. data |= GENMASK(31, 8);
  388. break;
  389. case 2:
  390. data = *((u16 *)buf);
  391. buf += 2;
  392. data |= GENMASK(31, 16);
  393. break;
  394. case 3:
  395. data = *buf;
  396. buf += 3;
  397. data |= GENMASK(31, 24);
  398. break;
  399. }
  400. writel(data, &regs->txd0r);
  401. size = 0;
  402. }
  403. }
  404. priv->tx_buf += len;
  405. return 0;
  406. }
  407. static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv)
  408. {
  409. const struct spi_mem_op *op = priv->op;
  410. u32 gen_fifo_cmd;
  411. u8 i, dummy_cycles, addr;
  412. /* Send opcode */
  413. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  414. gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->cmd.buswidth);
  415. gen_fifo_cmd |= GQSPI_GFIFO_TX;
  416. gen_fifo_cmd |= op->cmd.opcode;
  417. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  418. /* Send address */
  419. for (i = 0; i < op->addr.nbytes; i++) {
  420. addr = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
  421. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  422. gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->addr.buswidth);
  423. gen_fifo_cmd |= GQSPI_GFIFO_TX;
  424. gen_fifo_cmd |= addr;
  425. debug("GFIFO_CMD_Cmd = 0x%x\n", gen_fifo_cmd);
  426. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  427. }
  428. /* Send dummy */
  429. if (op->dummy.nbytes) {
  430. dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
  431. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  432. gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->dummy.buswidth);
  433. gen_fifo_cmd &= ~(GQSPI_GFIFO_TX | GQSPI_GFIFO_RX);
  434. gen_fifo_cmd |= GQSPI_GFIFO_DATA_XFR_MASK;
  435. gen_fifo_cmd |= dummy_cycles;
  436. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  437. }
  438. }
  439. static u32 zynqmp_qspi_calc_exp(struct zynqmp_qspi_priv *priv,
  440. u32 *gen_fifo_cmd)
  441. {
  442. u32 expval = 8;
  443. u32 len;
  444. while (1) {
  445. if (priv->len > 255) {
  446. if (priv->len & (1 << expval)) {
  447. *gen_fifo_cmd &= ~GQSPI_GFIFO_IMD_MASK;
  448. *gen_fifo_cmd |= GQSPI_GFIFO_EXP_MASK;
  449. *gen_fifo_cmd |= expval;
  450. priv->len -= (1 << expval);
  451. return expval;
  452. }
  453. expval++;
  454. } else {
  455. *gen_fifo_cmd &= ~(GQSPI_GFIFO_IMD_MASK |
  456. GQSPI_GFIFO_EXP_MASK);
  457. *gen_fifo_cmd |= (u8)priv->len;
  458. len = (u8)priv->len;
  459. priv->len = 0;
  460. return len;
  461. }
  462. }
  463. }
  464. static int zynqmp_qspi_genfifo_fill_tx(struct zynqmp_qspi_priv *priv)
  465. {
  466. u32 gen_fifo_cmd;
  467. u32 len;
  468. int ret = 0;
  469. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  470. gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
  471. gen_fifo_cmd |= GQSPI_GFIFO_TX |
  472. GQSPI_GFIFO_DATA_XFR_MASK;
  473. while (priv->len) {
  474. len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
  475. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  476. debug("GFIFO_CMD_TX:0x%x\n", gen_fifo_cmd);
  477. if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
  478. ret = zynqmp_qspi_fill_tx_fifo(priv,
  479. 1 << len);
  480. else
  481. ret = zynqmp_qspi_fill_tx_fifo(priv,
  482. len);
  483. if (ret)
  484. return ret;
  485. }
  486. return ret;
  487. }
  488. static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv,
  489. u32 gen_fifo_cmd, u32 *buf)
  490. {
  491. u32 addr;
  492. u32 size;
  493. u32 actuallen = priv->len;
  494. int ret = 0;
  495. struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs;
  496. writel((unsigned long)buf, &dma_regs->dmadst);
  497. writel(roundup(priv->len, GQSPI_DMA_ALIGN), &dma_regs->dmasize);
  498. writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier);
  499. addr = (unsigned long)buf;
  500. size = roundup(priv->len, GQSPI_DMA_ALIGN);
  501. flush_dcache_range(addr, addr + size);
  502. while (priv->len) {
  503. zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
  504. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  505. debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd);
  506. }
  507. ret = wait_for_bit_le32(&dma_regs->dmaisr, GQSPI_DMA_DST_I_STS_DONE,
  508. 1, GQSPI_TIMEOUT, 1);
  509. if (ret) {
  510. printf("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr));
  511. return -ETIMEDOUT;
  512. }
  513. writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr);
  514. debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n",
  515. (unsigned long)buf, (unsigned long)priv->rx_buf, *buf,
  516. actuallen);
  517. if (buf != priv->rx_buf)
  518. memcpy(priv->rx_buf, buf, actuallen);
  519. return 0;
  520. }
  521. static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv)
  522. {
  523. u32 gen_fifo_cmd;
  524. u32 *buf;
  525. u32 actuallen = priv->len;
  526. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  527. gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
  528. gen_fifo_cmd |= GQSPI_GFIFO_RX |
  529. GQSPI_GFIFO_DATA_XFR_MASK;
  530. /*
  531. * Check if receive buffer is aligned to 4 byte and length
  532. * is multiples of four byte as we are using dma to receive.
  533. */
  534. if (!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
  535. !(actuallen % GQSPI_DMA_ALIGN)) {
  536. buf = (u32 *)priv->rx_buf;
  537. return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
  538. }
  539. ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len,
  540. GQSPI_DMA_ALIGN));
  541. buf = (u32 *)tmp;
  542. return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
  543. }
  544. static int zynqmp_qspi_claim_bus(struct udevice *dev)
  545. {
  546. struct udevice *bus = dev->parent;
  547. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  548. struct zynqmp_qspi_regs *regs = priv->regs;
  549. writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  550. return 0;
  551. }
  552. static int zynqmp_qspi_release_bus(struct udevice *dev)
  553. {
  554. struct udevice *bus = dev->parent;
  555. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  556. struct zynqmp_qspi_regs *regs = priv->regs;
  557. writel(~GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  558. return 0;
  559. }
  560. static int zynqmp_qspi_exec_op(struct spi_slave *slave,
  561. const struct spi_mem_op *op)
  562. {
  563. struct zynqmp_qspi_priv *priv = dev_get_priv(slave->dev->parent);
  564. int ret = 0;
  565. priv->op = op;
  566. priv->tx_buf = op->data.buf.out;
  567. priv->rx_buf = op->data.buf.in;
  568. priv->len = op->data.nbytes;
  569. zynqmp_qspi_chipselect(priv, 1);
  570. /* Send opcode, addr, dummy */
  571. zynqmp_qspi_genfifo_cmd(priv);
  572. /* Request the transfer */
  573. if (op->data.dir == SPI_MEM_DATA_IN)
  574. ret = zynqmp_qspi_genfifo_fill_rx(priv);
  575. else if (op->data.dir == SPI_MEM_DATA_OUT)
  576. ret = zynqmp_qspi_genfifo_fill_tx(priv);
  577. zynqmp_qspi_chipselect(priv, 0);
  578. return ret;
  579. }
  580. static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
  581. .exec_op = zynqmp_qspi_exec_op,
  582. };
  583. static const struct dm_spi_ops zynqmp_qspi_ops = {
  584. .claim_bus = zynqmp_qspi_claim_bus,
  585. .release_bus = zynqmp_qspi_release_bus,
  586. .set_speed = zynqmp_qspi_set_speed,
  587. .set_mode = zynqmp_qspi_set_mode,
  588. .mem_ops = &zynqmp_qspi_mem_ops,
  589. };
  590. static const struct udevice_id zynqmp_qspi_ids[] = {
  591. { .compatible = "xlnx,zynqmp-qspi-1.0" },
  592. { .compatible = "xlnx,versal-qspi-1.0" },
  593. { }
  594. };
  595. U_BOOT_DRIVER(zynqmp_qspi) = {
  596. .name = "zynqmp_qspi",
  597. .id = UCLASS_SPI,
  598. .of_match = zynqmp_qspi_ids,
  599. .ops = &zynqmp_qspi_ops,
  600. .of_to_plat = zynqmp_qspi_of_to_plat,
  601. .plat_auto = sizeof(struct zynqmp_qspi_plat),
  602. .priv_auto = sizeof(struct zynqmp_qspi_priv),
  603. .probe = zynqmp_qspi_probe,
  604. };