zynqmp_gqspi.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2018 Xilinx
  4. *
  5. * Xilinx ZynqMP Generic Quad-SPI(QSPI) controller driver(master mode only)
  6. */
  7. #include <common.h>
  8. #include <cpu_func.h>
  9. #include <log.h>
  10. #include <asm/arch/sys_proto.h>
  11. #include <asm/cache.h>
  12. #include <asm/io.h>
  13. #include <clk.h>
  14. #include <dm.h>
  15. #include <malloc.h>
  16. #include <memalign.h>
  17. #include <spi.h>
  18. #include <ubi_uboot.h>
  19. #include <wait_bit.h>
  20. #include <dm/device_compat.h>
  21. #include <linux/bitops.h>
  22. #include <linux/err.h>
  23. #define GQSPI_GFIFO_STRT_MODE_MASK BIT(29)
  24. #define GQSPI_CONFIG_MODE_EN_MASK (3 << 30)
  25. #define GQSPI_CONFIG_DMA_MODE (2 << 30)
  26. #define GQSPI_CONFIG_CPHA_MASK BIT(2)
  27. #define GQSPI_CONFIG_CPOL_MASK BIT(1)
  28. /*
  29. * QSPI Interrupt Registers bit Masks
  30. *
  31. * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
  32. * bit definitions.
  33. */
  34. #define GQSPI_IXR_TXNFULL_MASK 0x00000004 /* QSPI TX FIFO Overflow */
  35. #define GQSPI_IXR_TXFULL_MASK 0x00000008 /* QSPI TX FIFO is full */
  36. #define GQSPI_IXR_RXNEMTY_MASK 0x00000010 /* QSPI RX FIFO Not Empty */
  37. #define GQSPI_IXR_GFEMTY_MASK 0x00000080 /* QSPI Generic FIFO Empty */
  38. #define GQSPI_IXR_ALL_MASK (GQSPI_IXR_TXNFULL_MASK | \
  39. GQSPI_IXR_RXNEMTY_MASK)
  40. /*
  41. * QSPI Enable Register bit Masks
  42. *
  43. * This register is used to enable or disable the QSPI controller
  44. */
  45. #define GQSPI_ENABLE_ENABLE_MASK 0x00000001 /* QSPI Enable Bit Mask */
  46. #define GQSPI_GFIFO_LOW_BUS BIT(14)
  47. #define GQSPI_GFIFO_CS_LOWER BIT(12)
  48. #define GQSPI_GFIFO_UP_BUS BIT(15)
  49. #define GQSPI_GFIFO_CS_UPPER BIT(13)
  50. #define GQSPI_SPI_MODE_QSPI (3 << 10)
  51. #define GQSPI_SPI_MODE_SPI BIT(10)
  52. #define GQSPI_SPI_MODE_DUAL_SPI (2 << 10)
  53. #define GQSPI_IMD_DATA_CS_ASSERT 5
  54. #define GQSPI_IMD_DATA_CS_DEASSERT 5
  55. #define GQSPI_GFIFO_TX BIT(16)
  56. #define GQSPI_GFIFO_RX BIT(17)
  57. #define GQSPI_GFIFO_STRIPE_MASK BIT(18)
  58. #define GQSPI_GFIFO_IMD_MASK 0xFF
  59. #define GQSPI_GFIFO_EXP_MASK BIT(9)
  60. #define GQSPI_GFIFO_DATA_XFR_MASK BIT(8)
  61. #define GQSPI_STRT_GEN_FIFO BIT(28)
  62. #define GQSPI_GEN_FIFO_STRT_MOD BIT(29)
  63. #define GQSPI_GFIFO_WP_HOLD BIT(19)
  64. #define GQSPI_BAUD_DIV_MASK (7 << 3)
  65. #define GQSPI_DFLT_BAUD_RATE_DIV BIT(3)
  66. #define GQSPI_GFIFO_ALL_INT_MASK 0xFBE
  67. #define GQSPI_DMA_DST_I_STS_DONE BIT(1)
  68. #define GQSPI_DMA_DST_I_STS_MASK 0xFE
  69. #define MODEBITS 0x6
  70. #define GQSPI_GFIFO_SELECT BIT(0)
  71. #define GQSPI_FIFO_THRESHOLD 1
  72. #define SPI_XFER_ON_BOTH 0
  73. #define SPI_XFER_ON_LOWER 1
  74. #define SPI_XFER_ON_UPPER 2
  75. #define GQSPI_DMA_ALIGN 0x4
  76. #define GQSPI_MAX_BAUD_RATE_VAL 7
  77. #define GQSPI_DFLT_BAUD_RATE_VAL 2
  78. #define GQSPI_TIMEOUT 100000000
  79. #define GQSPI_BAUD_DIV_SHIFT 2
  80. #define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
  81. #define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
  82. #define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
  83. #define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
  84. #define GQSPI_USE_DATA_DLY 0x1
  85. #define GQSPI_USE_DATA_DLY_SHIFT 31
  86. #define GQSPI_DATA_DLY_ADJ_VALUE 0x2
  87. #define GQSPI_DATA_DLY_ADJ_SHIFT 28
  88. #define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
  89. #define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
  90. #define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
  91. #define IOU_TAPDLY_BYPASS_OFST 0xFF180390
  92. #define GQSPI_LPBK_DLY_ADJ_LPBK_MASK 0x00000020
  93. #define GQSPI_FREQ_40MHZ 40000000
  94. #define GQSPI_FREQ_100MHZ 100000000
  95. #define GQSPI_FREQ_150MHZ 150000000
  96. #define IOU_TAPDLY_BYPASS_MASK 0x7
  97. #define GQSPI_REG_OFFSET 0x100
  98. #define GQSPI_DMA_REG_OFFSET 0x800
  99. /* QSPI register offsets */
  100. struct zynqmp_qspi_regs {
  101. u32 confr; /* 0x00 */
  102. u32 isr; /* 0x04 */
  103. u32 ier; /* 0x08 */
  104. u32 idisr; /* 0x0C */
  105. u32 imaskr; /* 0x10 */
  106. u32 enbr; /* 0x14 */
  107. u32 dr; /* 0x18 */
  108. u32 txd0r; /* 0x1C */
  109. u32 drxr; /* 0x20 */
  110. u32 sicr; /* 0x24 */
  111. u32 txftr; /* 0x28 */
  112. u32 rxftr; /* 0x2C */
  113. u32 gpior; /* 0x30 */
  114. u32 reserved0; /* 0x34 */
  115. u32 lpbkdly; /* 0x38 */
  116. u32 reserved1; /* 0x3C */
  117. u32 genfifo; /* 0x40 */
  118. u32 gqspisel; /* 0x44 */
  119. u32 reserved2; /* 0x48 */
  120. u32 gqfifoctrl; /* 0x4C */
  121. u32 gqfthr; /* 0x50 */
  122. u32 gqpollcfg; /* 0x54 */
  123. u32 gqpollto; /* 0x58 */
  124. u32 gqxfersts; /* 0x5C */
  125. u32 gqfifosnap; /* 0x60 */
  126. u32 gqrxcpy; /* 0x64 */
  127. u32 reserved3[36]; /* 0x68 */
  128. u32 gqspidlyadj; /* 0xF8 */
  129. };
  130. struct zynqmp_qspi_dma_regs {
  131. u32 dmadst; /* 0x00 */
  132. u32 dmasize; /* 0x04 */
  133. u32 dmasts; /* 0x08 */
  134. u32 dmactrl; /* 0x0C */
  135. u32 reserved0; /* 0x10 */
  136. u32 dmaisr; /* 0x14 */
  137. u32 dmaier; /* 0x18 */
  138. u32 dmaidr; /* 0x1C */
  139. u32 dmaimr; /* 0x20 */
  140. u32 dmactrl2; /* 0x24 */
  141. u32 dmadstmsb; /* 0x28 */
  142. };
  143. DECLARE_GLOBAL_DATA_PTR;
  144. struct zynqmp_qspi_platdata {
  145. struct zynqmp_qspi_regs *regs;
  146. struct zynqmp_qspi_dma_regs *dma_regs;
  147. u32 frequency;
  148. u32 speed_hz;
  149. };
  150. struct zynqmp_qspi_priv {
  151. struct zynqmp_qspi_regs *regs;
  152. struct zynqmp_qspi_dma_regs *dma_regs;
  153. const void *tx_buf;
  154. void *rx_buf;
  155. unsigned int len;
  156. int bytes_to_transfer;
  157. int bytes_to_receive;
  158. unsigned int is_inst;
  159. unsigned int cs_change:1;
  160. };
  161. static int zynqmp_qspi_ofdata_to_platdata(struct udevice *bus)
  162. {
  163. struct zynqmp_qspi_platdata *plat = bus->platdata;
  164. debug("%s\n", __func__);
  165. plat->regs = (struct zynqmp_qspi_regs *)(dev_read_addr(bus) +
  166. GQSPI_REG_OFFSET);
  167. plat->dma_regs = (struct zynqmp_qspi_dma_regs *)
  168. (dev_read_addr(bus) + GQSPI_DMA_REG_OFFSET);
  169. return 0;
  170. }
  171. static void zynqmp_qspi_init_hw(struct zynqmp_qspi_priv *priv)
  172. {
  173. u32 config_reg;
  174. struct zynqmp_qspi_regs *regs = priv->regs;
  175. writel(GQSPI_GFIFO_SELECT, &regs->gqspisel);
  176. writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->idisr);
  177. writel(GQSPI_FIFO_THRESHOLD, &regs->txftr);
  178. writel(GQSPI_FIFO_THRESHOLD, &regs->rxftr);
  179. writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->isr);
  180. config_reg = readl(&regs->confr);
  181. config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK |
  182. GQSPI_CONFIG_MODE_EN_MASK);
  183. config_reg |= GQSPI_CONFIG_DMA_MODE |
  184. GQSPI_GFIFO_WP_HOLD |
  185. GQSPI_DFLT_BAUD_RATE_DIV;
  186. writel(config_reg, &regs->confr);
  187. writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  188. }
  189. static u32 zynqmp_qspi_bus_select(struct zynqmp_qspi_priv *priv)
  190. {
  191. u32 gqspi_fifo_reg = 0;
  192. gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS |
  193. GQSPI_GFIFO_CS_LOWER;
  194. return gqspi_fifo_reg;
  195. }
  196. static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv,
  197. u32 gqspi_fifo_reg)
  198. {
  199. struct zynqmp_qspi_regs *regs = priv->regs;
  200. int ret = 0;
  201. ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_GFEMTY_MASK, 1,
  202. GQSPI_TIMEOUT, 1);
  203. if (ret)
  204. printf("%s Timeout\n", __func__);
  205. writel(gqspi_fifo_reg, &regs->genfifo);
  206. }
  207. static void zynqmp_qspi_chipselect(struct zynqmp_qspi_priv *priv, int is_on)
  208. {
  209. u32 gqspi_fifo_reg = 0;
  210. if (is_on) {
  211. gqspi_fifo_reg = zynqmp_qspi_bus_select(priv);
  212. gqspi_fifo_reg |= GQSPI_SPI_MODE_SPI |
  213. GQSPI_IMD_DATA_CS_ASSERT;
  214. } else {
  215. gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS;
  216. gqspi_fifo_reg |= GQSPI_IMD_DATA_CS_DEASSERT;
  217. }
  218. debug("GFIFO_CMD_CS: 0x%x\n", gqspi_fifo_reg);
  219. zynqmp_qspi_fill_gen_fifo(priv, gqspi_fifo_reg);
  220. }
  221. void zynqmp_qspi_set_tapdelay(struct udevice *bus, u32 baudrateval)
  222. {
  223. struct zynqmp_qspi_platdata *plat = bus->platdata;
  224. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  225. struct zynqmp_qspi_regs *regs = priv->regs;
  226. u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
  227. u32 reqhz = 0;
  228. clk_rate = plat->frequency;
  229. reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
  230. debug("%s, req_hz:%d, clk_rate:%d, baudrateval:%d\n",
  231. __func__, reqhz, clk_rate, baudrateval);
  232. if (reqhz < GQSPI_FREQ_40MHZ) {
  233. zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
  234. tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
  235. TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
  236. } else if (reqhz <= GQSPI_FREQ_100MHZ) {
  237. zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
  238. tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
  239. TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
  240. lpbkdlyadj = readl(&regs->lpbkdly);
  241. lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_LPBK_MASK);
  242. datadlyadj = readl(&regs->gqspidlyadj);
  243. datadlyadj |= ((GQSPI_USE_DATA_DLY << GQSPI_USE_DATA_DLY_SHIFT)
  244. | (GQSPI_DATA_DLY_ADJ_VALUE <<
  245. GQSPI_DATA_DLY_ADJ_SHIFT));
  246. } else if (reqhz <= GQSPI_FREQ_150MHZ) {
  247. lpbkdlyadj = readl(&regs->lpbkdly);
  248. lpbkdlyadj |= ((GQSPI_LPBK_DLY_ADJ_LPBK_MASK) |
  249. GQSPI_LPBK_DLY_ADJ_DLY_0);
  250. }
  251. zynqmp_mmio_write(IOU_TAPDLY_BYPASS_OFST, IOU_TAPDLY_BYPASS_MASK,
  252. tapdlybypass);
  253. writel(lpbkdlyadj, &regs->lpbkdly);
  254. writel(datadlyadj, &regs->gqspidlyadj);
  255. }
  256. static int zynqmp_qspi_set_speed(struct udevice *bus, uint speed)
  257. {
  258. struct zynqmp_qspi_platdata *plat = bus->platdata;
  259. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  260. struct zynqmp_qspi_regs *regs = priv->regs;
  261. u32 confr;
  262. u8 baud_rate_val = 0;
  263. debug("%s\n", __func__);
  264. if (speed > plat->frequency)
  265. speed = plat->frequency;
  266. /* Set the clock frequency */
  267. confr = readl(&regs->confr);
  268. if (speed == 0) {
  269. /* Set baudrate x8, if the freq is 0 */
  270. baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
  271. } else if (plat->speed_hz != speed) {
  272. while ((baud_rate_val < 8) &&
  273. ((plat->frequency /
  274. (2 << baud_rate_val)) > speed))
  275. baud_rate_val++;
  276. if (baud_rate_val > GQSPI_MAX_BAUD_RATE_VAL)
  277. baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
  278. plat->speed_hz = plat->frequency / (2 << baud_rate_val);
  279. }
  280. confr &= ~GQSPI_BAUD_DIV_MASK;
  281. confr |= (baud_rate_val << 3);
  282. writel(confr, &regs->confr);
  283. zynqmp_qspi_set_tapdelay(bus, baud_rate_val);
  284. debug("regs=%p, speed=%d\n", priv->regs, plat->speed_hz);
  285. return 0;
  286. }
  287. static int zynqmp_qspi_probe(struct udevice *bus)
  288. {
  289. struct zynqmp_qspi_platdata *plat = dev_get_platdata(bus);
  290. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  291. struct clk clk;
  292. unsigned long clock;
  293. int ret;
  294. debug("%s: bus:%p, priv:%p\n", __func__, bus, priv);
  295. priv->regs = plat->regs;
  296. priv->dma_regs = plat->dma_regs;
  297. ret = clk_get_by_index(bus, 0, &clk);
  298. if (ret < 0) {
  299. dev_err(bus, "failed to get clock\n");
  300. return ret;
  301. }
  302. clock = clk_get_rate(&clk);
  303. if (IS_ERR_VALUE(clock)) {
  304. dev_err(bus, "failed to get rate\n");
  305. return clock;
  306. }
  307. debug("%s: CLK %ld\n", __func__, clock);
  308. ret = clk_enable(&clk);
  309. if (ret && ret != -ENOSYS) {
  310. dev_err(bus, "failed to enable clock\n");
  311. return ret;
  312. }
  313. plat->frequency = clock;
  314. plat->speed_hz = plat->frequency / 2;
  315. /* init the zynq spi hw */
  316. zynqmp_qspi_init_hw(priv);
  317. return 0;
  318. }
  319. static int zynqmp_qspi_set_mode(struct udevice *bus, uint mode)
  320. {
  321. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  322. struct zynqmp_qspi_regs *regs = priv->regs;
  323. u32 confr;
  324. debug("%s\n", __func__);
  325. /* Set the SPI Clock phase and polarities */
  326. confr = readl(&regs->confr);
  327. confr &= ~(GQSPI_CONFIG_CPHA_MASK |
  328. GQSPI_CONFIG_CPOL_MASK);
  329. if (mode & SPI_CPHA)
  330. confr |= GQSPI_CONFIG_CPHA_MASK;
  331. if (mode & SPI_CPOL)
  332. confr |= GQSPI_CONFIG_CPOL_MASK;
  333. writel(confr, &regs->confr);
  334. return 0;
  335. }
  336. static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size)
  337. {
  338. u32 data;
  339. int ret = 0;
  340. struct zynqmp_qspi_regs *regs = priv->regs;
  341. u32 *buf = (u32 *)priv->tx_buf;
  342. u32 len = size;
  343. debug("TxFIFO: 0x%x, size: 0x%x\n", readl(&regs->isr),
  344. size);
  345. while (size) {
  346. ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_TXNFULL_MASK, 1,
  347. GQSPI_TIMEOUT, 1);
  348. if (ret) {
  349. printf("%s: Timeout\n", __func__);
  350. return ret;
  351. }
  352. if (size >= 4) {
  353. writel(*buf, &regs->txd0r);
  354. buf++;
  355. size -= 4;
  356. } else {
  357. switch (size) {
  358. case 1:
  359. data = *((u8 *)buf);
  360. buf += 1;
  361. data |= GENMASK(31, 8);
  362. break;
  363. case 2:
  364. data = *((u16 *)buf);
  365. buf += 2;
  366. data |= GENMASK(31, 16);
  367. break;
  368. case 3:
  369. data = *((u16 *)buf);
  370. buf += 2;
  371. data |= (*((u8 *)buf) << 16);
  372. buf += 1;
  373. data |= GENMASK(31, 24);
  374. break;
  375. }
  376. writel(data, &regs->txd0r);
  377. size = 0;
  378. }
  379. }
  380. priv->tx_buf += len;
  381. return 0;
  382. }
  383. static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv)
  384. {
  385. u32 gen_fifo_cmd;
  386. u32 bytecount = 0;
  387. while (priv->len) {
  388. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  389. gen_fifo_cmd |= GQSPI_GFIFO_TX | GQSPI_SPI_MODE_SPI;
  390. gen_fifo_cmd |= *(u8 *)priv->tx_buf;
  391. bytecount++;
  392. priv->len--;
  393. priv->tx_buf = (u8 *)priv->tx_buf + 1;
  394. debug("GFIFO_CMD_Cmd = 0x%x\n", gen_fifo_cmd);
  395. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  396. }
  397. }
  398. static u32 zynqmp_qspi_calc_exp(struct zynqmp_qspi_priv *priv,
  399. u32 *gen_fifo_cmd)
  400. {
  401. u32 expval = 8;
  402. u32 len;
  403. while (1) {
  404. if (priv->len > 255) {
  405. if (priv->len & (1 << expval)) {
  406. *gen_fifo_cmd &= ~GQSPI_GFIFO_IMD_MASK;
  407. *gen_fifo_cmd |= GQSPI_GFIFO_EXP_MASK;
  408. *gen_fifo_cmd |= expval;
  409. priv->len -= (1 << expval);
  410. return expval;
  411. }
  412. expval++;
  413. } else {
  414. *gen_fifo_cmd &= ~(GQSPI_GFIFO_IMD_MASK |
  415. GQSPI_GFIFO_EXP_MASK);
  416. *gen_fifo_cmd |= (u8)priv->len;
  417. len = (u8)priv->len;
  418. priv->len = 0;
  419. return len;
  420. }
  421. }
  422. }
  423. static int zynqmp_qspi_genfifo_fill_tx(struct zynqmp_qspi_priv *priv)
  424. {
  425. u32 gen_fifo_cmd;
  426. u32 len;
  427. int ret = 0;
  428. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  429. gen_fifo_cmd |= GQSPI_GFIFO_TX |
  430. GQSPI_GFIFO_DATA_XFR_MASK;
  431. gen_fifo_cmd |= GQSPI_SPI_MODE_SPI;
  432. while (priv->len) {
  433. len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
  434. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  435. debug("GFIFO_CMD_TX:0x%x\n", gen_fifo_cmd);
  436. if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
  437. ret = zynqmp_qspi_fill_tx_fifo(priv,
  438. 1 << len);
  439. else
  440. ret = zynqmp_qspi_fill_tx_fifo(priv,
  441. len);
  442. if (ret)
  443. return ret;
  444. }
  445. return ret;
  446. }
  447. static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv,
  448. u32 gen_fifo_cmd, u32 *buf)
  449. {
  450. u32 addr;
  451. u32 size, len;
  452. u32 actuallen = priv->len;
  453. int ret = 0;
  454. struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs;
  455. writel((unsigned long)buf, &dma_regs->dmadst);
  456. writel(roundup(priv->len, ARCH_DMA_MINALIGN), &dma_regs->dmasize);
  457. writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier);
  458. addr = (unsigned long)buf;
  459. size = roundup(priv->len, ARCH_DMA_MINALIGN);
  460. flush_dcache_range(addr, addr + size);
  461. while (priv->len) {
  462. len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
  463. if (!(gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK) &&
  464. (len % ARCH_DMA_MINALIGN)) {
  465. gen_fifo_cmd &= ~GENMASK(7, 0);
  466. gen_fifo_cmd |= roundup(len, ARCH_DMA_MINALIGN);
  467. }
  468. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  469. debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd);
  470. }
  471. ret = wait_for_bit_le32(&dma_regs->dmaisr, GQSPI_DMA_DST_I_STS_DONE,
  472. 1, GQSPI_TIMEOUT, 1);
  473. if (ret) {
  474. printf("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr));
  475. return -ETIMEDOUT;
  476. }
  477. writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr);
  478. debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n",
  479. (unsigned long)buf, (unsigned long)priv->rx_buf, *buf,
  480. actuallen);
  481. if (buf != priv->rx_buf)
  482. memcpy(priv->rx_buf, buf, actuallen);
  483. return 0;
  484. }
  485. static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv)
  486. {
  487. u32 gen_fifo_cmd;
  488. u32 *buf;
  489. u32 actuallen = priv->len;
  490. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  491. gen_fifo_cmd |= GQSPI_GFIFO_RX |
  492. GQSPI_GFIFO_DATA_XFR_MASK;
  493. gen_fifo_cmd |= GQSPI_SPI_MODE_SPI;
  494. /*
  495. * Check if receive buffer is aligned to 4 byte and length
  496. * is multiples of four byte as we are using dma to receive.
  497. */
  498. if (!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
  499. !(actuallen % GQSPI_DMA_ALIGN)) {
  500. buf = (u32 *)priv->rx_buf;
  501. return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
  502. }
  503. ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len,
  504. GQSPI_DMA_ALIGN));
  505. buf = (u32 *)tmp;
  506. return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
  507. }
  508. static int zynqmp_qspi_start_transfer(struct zynqmp_qspi_priv *priv)
  509. {
  510. int ret = 0;
  511. if (priv->is_inst) {
  512. if (priv->tx_buf)
  513. zynqmp_qspi_genfifo_cmd(priv);
  514. else
  515. return -EINVAL;
  516. } else {
  517. if (priv->tx_buf)
  518. ret = zynqmp_qspi_genfifo_fill_tx(priv);
  519. else if (priv->rx_buf)
  520. ret = zynqmp_qspi_genfifo_fill_rx(priv);
  521. else
  522. return -EINVAL;
  523. }
  524. return ret;
  525. }
  526. static int zynqmp_qspi_transfer(struct zynqmp_qspi_priv *priv)
  527. {
  528. static unsigned int cs_change = 1;
  529. int status = 0;
  530. debug("%s\n", __func__);
  531. while (1) {
  532. /* Select the chip if required */
  533. if (cs_change)
  534. zynqmp_qspi_chipselect(priv, 1);
  535. cs_change = priv->cs_change;
  536. if (!priv->tx_buf && !priv->rx_buf && priv->len) {
  537. status = -EINVAL;
  538. break;
  539. }
  540. /* Request the transfer */
  541. if (priv->len) {
  542. status = zynqmp_qspi_start_transfer(priv);
  543. priv->is_inst = 0;
  544. if (status < 0)
  545. break;
  546. }
  547. if (cs_change)
  548. /* Deselect the chip */
  549. zynqmp_qspi_chipselect(priv, 0);
  550. break;
  551. }
  552. return status;
  553. }
  554. static int zynqmp_qspi_claim_bus(struct udevice *dev)
  555. {
  556. struct udevice *bus = dev->parent;
  557. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  558. struct zynqmp_qspi_regs *regs = priv->regs;
  559. writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  560. return 0;
  561. }
  562. static int zynqmp_qspi_release_bus(struct udevice *dev)
  563. {
  564. struct udevice *bus = dev->parent;
  565. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  566. struct zynqmp_qspi_regs *regs = priv->regs;
  567. writel(~GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  568. return 0;
  569. }
  570. int zynqmp_qspi_xfer(struct udevice *dev, unsigned int bitlen, const void *dout,
  571. void *din, unsigned long flags)
  572. {
  573. struct udevice *bus = dev->parent;
  574. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  575. debug("%s: priv: 0x%08lx bitlen: %d dout: 0x%08lx ", __func__,
  576. (unsigned long)priv, bitlen, (unsigned long)dout);
  577. debug("din: 0x%08lx flags: 0x%lx\n", (unsigned long)din, flags);
  578. priv->tx_buf = dout;
  579. priv->rx_buf = din;
  580. priv->len = bitlen / 8;
  581. /*
  582. * Assume that the beginning of a transfer with bits to
  583. * transmit must contain a device command.
  584. */
  585. if (dout && flags & SPI_XFER_BEGIN)
  586. priv->is_inst = 1;
  587. else
  588. priv->is_inst = 0;
  589. if (flags & SPI_XFER_END)
  590. priv->cs_change = 1;
  591. else
  592. priv->cs_change = 0;
  593. zynqmp_qspi_transfer(priv);
  594. return 0;
  595. }
  596. static const struct dm_spi_ops zynqmp_qspi_ops = {
  597. .claim_bus = zynqmp_qspi_claim_bus,
  598. .release_bus = zynqmp_qspi_release_bus,
  599. .xfer = zynqmp_qspi_xfer,
  600. .set_speed = zynqmp_qspi_set_speed,
  601. .set_mode = zynqmp_qspi_set_mode,
  602. };
  603. static const struct udevice_id zynqmp_qspi_ids[] = {
  604. { .compatible = "xlnx,zynqmp-qspi-1.0" },
  605. { .compatible = "xlnx,versal-qspi-1.0" },
  606. { }
  607. };
  608. U_BOOT_DRIVER(zynqmp_qspi) = {
  609. .name = "zynqmp_qspi",
  610. .id = UCLASS_SPI,
  611. .of_match = zynqmp_qspi_ids,
  612. .ops = &zynqmp_qspi_ops,
  613. .ofdata_to_platdata = zynqmp_qspi_ofdata_to_platdata,
  614. .platdata_auto_alloc_size = sizeof(struct zynqmp_qspi_platdata),
  615. .priv_auto_alloc_size = sizeof(struct zynqmp_qspi_priv),
  616. .probe = zynqmp_qspi_probe,
  617. };