designware_qspi.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Designware master SPI core controller driver
  4. *
  5. * Copyright (C) 2020 linghui.zlh <linghui.zlh@alibaba-inc.com>
  6. *
  7. * Very loosely based on the Linux driver:
  8. * drivers/spi/spi-dw.c, which is:
  9. * Copyright (c) 2009, Intel Corporation.
  10. */
  11. #include <common.h>
  12. #include <asm-generic/gpio.h>
  13. #include <clk.h>
  14. #include <dm.h>
  15. #include <errno.h>
  16. #include <malloc.h>
  17. #include <spi.h>
  18. #include <spi-mem.h>
  19. #include <dm.h>
  20. #include <fdtdec.h>
  21. #include <reset.h>
  22. #include <linux/compat.h>
  23. #include <linux/iopoll.h>
  24. #include <asm/io.h>
  25. /* Register offsets */
  26. #define DW_SPI_CTRL0 0x00
  27. #define DW_SPI_CTRL1 0x04
  28. #define DW_SPI_SSIENR 0x08
  29. #define DW_SPI_MWCR 0x0c
  30. #define DW_SPI_SER 0x10
  31. #define DW_SPI_BAUDR 0x14
  32. #define DW_SPI_TXFLTR 0x18
  33. #define DW_SPI_RXFLTR 0x1c
  34. #define DW_SPI_TXFLR 0x20
  35. #define DW_SPI_RXFLR 0x24
  36. #define DW_SPI_SR 0x28
  37. #define DW_SPI_IMR 0x2c
  38. #define DW_SPI_ISR 0x30
  39. #define DW_SPI_RISR 0x34
  40. #define DW_SPI_TXOICR 0x38
  41. #define DW_SPI_RXOICR 0x3c
  42. #define DW_SPI_RXUICR 0x40
  43. #define DW_SPI_MSTICR 0x44
  44. #define DW_SPI_ICR 0x48
  45. #define DW_SPI_DMACR 0x4c
  46. #define DW_SPI_DMATDLR 0x50
  47. #define DW_SPI_DMARDLR 0x54
  48. #define DW_SPI_IDR 0x58
  49. #define DW_SPI_VERSION 0x5c
  50. #define DW_SPI_DR 0x60
  51. #define DW_SPI_RX_SAMPLE_DLY 0xf0
  52. #define DW_SPI_SPI_CTRLR0 0xf4
  53. /* Bit fields in CTRLR0 */
  54. #define SPI_DFS32_OFFSET 16
  55. #define SPI_DFS32_MASK (0x1f<<SPI_DFS32_OFFSET)
  56. #define SPI_FRF_OFFSET 4
  57. #define SPI_FRF_SPI 0x0
  58. #define SPI_FRF_SSP 0x1
  59. #define SPI_FRF_MICROWIRE 0x2
  60. #define SPI_FRF_RESV 0x3
  61. #define SPI_MODE_OFFSET 6
  62. #define SPI_SCPH_OFFSET 6
  63. #define SPI_SCOL_OFFSET 7
  64. #define SPI_TMOD_OFFSET 8
  65. #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
  66. #define SPI_TMOD_TR 0x0 /* xmit & recv */
  67. #define SPI_TMOD_TO 0x1 /* xmit only */
  68. #define SPI_TMOD_RO 0x2 /* recv only */
  69. #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
  70. #define SPI_SLVOE_OFFSET 10
  71. #define SPI_SRL_OFFSET 11
  72. #define SPI_CFS_OFFSET 12
  73. #define SPI_SPI_FRF_OFFSET 21
  74. #define SPI_SPI_FRF_MASK (0x3 << SPI_SPI_FRF_OFFSET)
  75. #define SPI_SPI_FRF_STD 0x0
  76. #define SPI_SPI_FRF_DUAL 0x1
  77. #define SPI_SPI_FRF_QUAD 0x2
  78. #define SPI_SPI_FRF_OCTA 0x3
  79. #define SPI_SSTE_OFFSET 24
  80. #define SPI_SSTE_MASK (1 << SPI_SSTE_OFFSET )
  81. /* Bit fields in SR, 7 bits */
  82. #define SR_MASK GENMASK(6, 0) /* cover 7 bits */
  83. #define SR_BUSY (1<<0)//BIT(0)
  84. #define SR_TF_NOT_FULL BIT(1)
  85. #define SR_TF_EMPT BIT(2)
  86. #define SR_RF_NOT_EMPT BIT(3)
  87. #define SR_RF_FULL BIT(4)
  88. #define SR_TX_ERR BIT(5)
  89. #define SR_DCOL BIT(6)
  90. /* Bit fields in ISR, IMR, RISR, 7 bits */
  91. #define SPI_INT_TXEI (1 << 0) /* tx fifo empty */
  92. #define SPI_INT_TXOI (1 << 1) /* tx fifo overflow */
  93. #define SPI_INT_RXUI (1 << 2) /* rx fifo underfolow */
  94. #define SPI_INT_RXOI (1 << 3) /* rx fifo overflow */
  95. #define SPI_INT_RXFI (1 << 4) /* rx fifo full */
  96. #define SPI_INT_MSTI (1 << 5) /* multi-master contention interrupt, set only when configured as slave device */
  97. /* Bit fields in SPI_CTRLR0 */
  98. #define SPI_CTRLR0_TRNAS_OFFET 0x0
  99. #define SPI_CTRLR0_TRANS_ISTD_ASTD (0) /* both instr and address are sent in stdandard mode */
  100. #define SPI_CTRLR0_TRANS_ISTD_ASPF (1) /* instr sent in standard mode , address sent in the mode specified by CTRLR0.SPI_FRF */
  101. #define SPI_CTRLR0_TRANS_ISPF_ASPF (2) /* both instr and address sent in the mode specified by CTRLR0.SPI_FRF */
  102. #define SPI_CTRLR0_ADDR_L_OFFSET 0x2
  103. #define SPI_CTRLR0_INST_L_OFFSET 0x8
  104. #define SPI_CTRLR0_INST_L_0 (0)
  105. #define SPI_CTRLR0_INST_L_4 (1)
  106. #define SPI_CTRLR0_INST_L_8 (2)
  107. #define SPI_CTRLR0_INST_L_16 (3)
  108. #define SPI_CTRLR0_WAIT_CYCLES_OFFSET (11) /* count in io clks */
  109. #define SPI_CTRLR0_DDR_EN_OFFSET (16) /* enable dual-ata rate transfers in dual/quad/octal frame formats of spi */
  110. #define SPI_CTRLR0_DDR_EN_MASK (1 << SPI_CTRLR0_DDR_EN_OFFSET)
  111. #define SPI_CTRLR0_INST_DDR_EN_OFFSET (17)
  112. #define SPI_CTRLR0_INST_DDR_EN_MASK (1 << SPI_CTRLR0_INST_DDR_EN_OFFSET
  113. #define SPI_CTRLR0_RXDS_EN_OFFSET (18)
  114. #define SPI_CTRLR0_RXDS_EN_MASK (1 << SPI_CTRLR0_RXDS_EN_OFFSET)
  115. #define RX_TIMEOUT 1000 /* timeout in ms */
  116. struct dw_qspi_platdata {
  117. s32 frequency; /* Default clock frequency, -1 for none */
  118. void __iomem *regs;
  119. };
  120. struct xfer_pre_t {
  121. #define DW_MAX_CMD_BUF_LEN 32
  122. u8 xfer_pre[DW_MAX_CMD_BUF_LEN];
  123. u32 xfer_pre_len;
  124. };
  125. struct dw_qspi_priv {
  126. void __iomem *regs;
  127. unsigned int freq; /* Default frequency */
  128. unsigned int mode;
  129. struct clk clk;
  130. unsigned long bus_clk_rate;
  131. struct gpio_desc cs_gpio; /* External chip-select gpio */
  132. /* used by spi_controller_mem_ops interface */
  133. struct xfer_pre_t xfer_data_pre;
  134. u8 cs; /* chip select pin */
  135. u8 tmode; /* TR/TO/RO/EEPROM */
  136. u8 type; /* SPI/SSP/MicroWire */
  137. u8 n_bytes; /* bytes per-word */
  138. int len;
  139. u32 fifo_len; /* depth of the FIFO buffer */
  140. void *tx;
  141. void *tx_end;
  142. void *rx;
  143. void *rx_end;
  144. struct reset_ctl_bulk resets;
  145. };
  146. static inline u32 dw_read(struct dw_qspi_priv *priv, u32 offset)
  147. {
  148. return __raw_readl(priv->regs + offset);
  149. }
  150. static inline void dw_write(struct dw_qspi_priv *priv, u32 offset, u32 val)
  151. {
  152. __raw_writel(val, priv->regs + offset);
  153. }
  154. static int request_gpio_cs(struct udevice *bus)
  155. {
  156. #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
  157. struct dw_qspi_priv *priv = dev_get_priv(bus);
  158. int ret;
  159. /* External chip select gpio line is optional */
  160. ret = gpio_request_by_name(bus, "cs-gpio", 0, &priv->cs_gpio, 0);
  161. if (ret == -ENOENT)
  162. return 0;
  163. if (ret < 0) {
  164. debug("Error: %d: Can't get %s gpio!\n", ret, bus->name);
  165. return ret;
  166. }
  167. if (dm_gpio_is_valid(&priv->cs_gpio)) {
  168. dm_gpio_set_dir_flags(&priv->cs_gpio,
  169. GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
  170. }
  171. debug("%s: used external gpio for CS management\n", __func__);
  172. #endif
  173. return 0;
  174. }
  175. static int dw_qspi_ofdata_to_platdata(struct udevice *bus)
  176. {
  177. struct dw_qspi_platdata *plat = bus->platdata;
  178. plat->regs = (struct dw_qspi *)devfdt_get_addr(bus);
  179. /* Use 500KHz as a suitable default */
  180. plat->frequency = dev_read_u32_default(bus, "spi-max-frequency",
  181. 1000000);
  182. debug("%s: regs=%p max-frequency=%d\n", __func__, plat->regs,
  183. plat->frequency);
  184. return request_gpio_cs(bus);
  185. }
  186. static inline void spi_enable_chip(struct dw_qspi_priv *priv, int enable)
  187. {
  188. dw_write(priv, DW_SPI_SSIENR, (enable ? 1 : 0));
  189. }
  190. static inline void spi_enable_slave(struct dw_qspi_priv *priv, u32 slave_idx)
  191. {
  192. u32 val;
  193. val = dw_read(priv,DW_SPI_SER);
  194. val |= 1 << slave_idx;
  195. dw_write(priv, DW_SPI_SER, val);
  196. }
  197. static inline void spi_disable_slave(struct dw_qspi_priv *priv, int slave_idx)
  198. {
  199. u32 val;
  200. val = dw_read(priv,DW_SPI_SER);
  201. val &= ~(1 << slave_idx);
  202. dw_write(priv, DW_SPI_SER, val);
  203. }
  204. /* Restart the controller, disable all interrupts, clean rx fifo */
  205. static void spi_hw_init(struct dw_qspi_priv *priv)
  206. {
  207. spi_enable_chip(priv, 0);
  208. dw_write(priv, DW_SPI_IMR, 0xff);
  209. dw_write(priv, DW_SPI_SER, 0x0);
  210. dw_write(priv, DW_SPI_RX_SAMPLE_DLY, 0x4);
  211. spi_enable_chip(priv, 1);
  212. /*
  213. * Try to detect the FIFO depth if not set by interface driver,
  214. * the depth could be from 2 to 256 from HW spec
  215. */
  216. if (!priv->fifo_len) {
  217. u32 fifo;
  218. for (fifo = 1; fifo < 256; fifo++) {
  219. dw_write(priv, DW_SPI_TXFLTR, fifo);
  220. if (fifo != dw_read(priv, DW_SPI_TXFLTR))
  221. break;
  222. }
  223. priv->fifo_len = (fifo == 1) ? 0 : fifo;
  224. dw_write(priv, DW_SPI_TXFLTR, 0);
  225. }
  226. debug("%s: fifo_len=%d\n", __func__, priv->fifo_len);
  227. }
  228. /*
  229. * We define dw_qspi_get_clk function as 'weak' as some targets
  230. * (like SOCFPGA_GEN5 and SOCFPGA_ARRIA10) don't use standard clock API
  231. * and implement dw_qspi_get_clk their own way in their clock manager.
  232. */
  233. __weak int dw_qspi_get_clk(struct udevice *bus, ulong *rate)
  234. {
  235. struct dw_qspi_priv *priv = dev_get_priv(bus);
  236. int ret;
  237. ret = clk_get_by_index(bus, 0, &priv->clk);
  238. if (ret)
  239. return ret;
  240. ret = clk_enable(&priv->clk);
  241. if (ret && ret != -ENOSYS && ret != -ENOTSUPP)
  242. return ret;
  243. *rate = clk_get_rate(&priv->clk);
  244. if (!*rate)
  245. goto err_rate;
  246. debug("%s: get spi controller clk via device tree: %lu Hz\n",
  247. __func__, *rate);
  248. return 0;
  249. err_rate:
  250. clk_disable(&priv->clk);
  251. clk_free(&priv->clk);
  252. return -EINVAL;
  253. }
  254. static int dw_qspi_reset(struct udevice *bus)
  255. {
  256. int ret;
  257. struct dw_qspi_priv *priv = dev_get_priv(bus);
  258. ret = reset_get_bulk(bus, &priv->resets);
  259. if (ret) {
  260. /*
  261. * Return 0 if error due to !CONFIG_DM_RESET and reset
  262. * DT property is not present.
  263. */
  264. if (ret == -ENOENT || ret == -ENOTSUPP)
  265. return 0;
  266. dev_warn(bus, "Can't get reset: %d\n", ret);
  267. return ret;
  268. }
  269. ret = reset_deassert_bulk(&priv->resets);
  270. if (ret) {
  271. reset_release_bulk(&priv->resets);
  272. dev_err(bus, "Failed to reset: %d\n", ret);
  273. return ret;
  274. }
  275. return 0;
  276. }
  277. static int dw_qspi_probe(struct udevice *bus)
  278. {
  279. struct dw_qspi_platdata *plat = dev_get_platdata(bus);
  280. struct dw_qspi_priv *priv = dev_get_priv(bus);
  281. int ret;
  282. priv->regs = plat->regs;
  283. priv->freq = plat->frequency;
  284. ret = dw_qspi_get_clk(bus, &priv->bus_clk_rate);
  285. if (ret)
  286. return ret;
  287. ret = dw_qspi_reset(bus);
  288. if (ret)
  289. return ret;
  290. priv->n_bytes = 1;
  291. priv->tmode = SPI_TMOD_TO; /* Tx & Rx */
  292. /* Basic HW init */
  293. spi_hw_init(priv);
  294. return 0;
  295. }
  296. /* Return the max entries we can fill into tx fifo */
  297. static inline u32 tx_max(struct dw_qspi_priv *priv)
  298. {
  299. u32 tx_left, tx_room, rxtx_gap;
  300. tx_left = (priv->tx_end - priv->tx) / (priv->n_bytes);
  301. tx_room = priv->fifo_len - dw_read(priv, DW_SPI_TXFLR);
  302. /*
  303. * Another concern is about the tx/rx mismatch, we
  304. * thought about using (priv->fifo_len - rxflr - txflr) as
  305. * one maximum value for tx, but it doesn't cover the
  306. * data which is out of tx/rx fifo and inside the
  307. * shift registers. So a control from sw point of
  308. * view is taken.
  309. */
  310. rxtx_gap = ((priv->rx_end - priv->rx) - (priv->tx_end - priv->tx)) /
  311. (priv->n_bytes);
  312. return min3(tx_left, tx_room, (u32)(priv->fifo_len - rxtx_gap));
  313. }
  314. /* Return the max entries we should read out of rx fifo */
  315. static inline u32 rx_max(struct dw_qspi_priv *priv)
  316. {
  317. u32 rx_left = (priv->rx_end - priv->rx) / (priv->n_bytes);
  318. return min_t(u32, rx_left, dw_read(priv, DW_SPI_RXFLR));
  319. }
  320. static void dw_writer(struct dw_qspi_priv *priv)
  321. {
  322. u32 max = tx_max(priv);
  323. volatile u32 txw = 0;
  324. while (max--) {
  325. /* Set the tx word if the transfer's original "tx" is not null */
  326. if (priv->tx_end - priv->len) {
  327. if (priv->n_bytes == 1)
  328. txw = *(u8 *)(priv->tx);
  329. else if (priv->n_bytes == 2)
  330. txw = *(u16 *)(priv->tx);
  331. else
  332. txw = *(u32 *)(priv->tx);
  333. }
  334. dw_write(priv, DW_SPI_DR, txw);
  335. //debug("%s: tx=0x%02x\n", __func__, txw);
  336. priv->tx += priv->n_bytes;
  337. }
  338. }
  339. static void dw_reader(struct dw_qspi_priv *priv)
  340. {
  341. u32 max = rx_max(priv);
  342. volatile u32 rxw;
  343. while (max--) {
  344. rxw = dw_read(priv, DW_SPI_DR);
  345. debug("%s: rx=0x%02x\n", __func__, rxw);
  346. /* Care about rx if the transfer's original "rx" is not null */
  347. if (priv->rx_end - priv->len) {
  348. if (priv->n_bytes == 1)
  349. *(u8 *)(priv->rx) = rxw;
  350. else if (priv->n_bytes == 2)
  351. *(u16 *)(priv->rx) = rxw;
  352. else {
  353. *(u32 *)(priv->rx) = rxw;
  354. }
  355. }
  356. priv->rx += priv->n_bytes;
  357. }
  358. }
  359. static int poll_transfer(struct dw_qspi_priv *priv)
  360. {
  361. do {
  362. dw_writer(priv);
  363. //dw_reader(priv);
  364. } while (priv->tx > priv->tx_end);
  365. return 0;
  366. }
  367. /*
  368. * We define external_cs_manage function as 'weak' as some targets
  369. * (like MSCC Ocelot) don't control the external CS pin using a GPIO
  370. * controller. These SoCs use specific registers to control by
  371. * software the SPI pins (and especially the CS).
  372. */
  373. __weak void external_cs_manage(struct udevice *dev, bool on)
  374. {
  375. #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
  376. struct dw_qspi_priv *priv = dev_get_priv(dev->parent);
  377. if (!dm_gpio_is_valid(&priv->cs_gpio))
  378. return;
  379. dm_gpio_set_value(&priv->cs_gpio, on ? 1 : 0);
  380. #endif
  381. }
  382. static int dw_qspi_wait_bus_idle(struct dw_qspi_priv *priv, u32 timeout_us)
  383. {
  384. u32 val;
  385. void __iomem *reg = priv->regs + DW_SPI_SR;
  386. return readl_poll_timeout(reg, val, !(val & SR_BUSY), timeout_us);
  387. }
  388. static void dw_qspi_build_xfer_pre_portion(struct dw_qspi_priv *priv, const struct spi_mem_op *op)
  389. {
  390. u32 i = 0, j = 0;
  391. /* operation code */
  392. priv->xfer_data_pre.xfer_pre[i++] = op->cmd.opcode;
  393. /* addr */
  394. if (op->addr.nbytes) {
  395. for (j = 0; j < op->addr.nbytes; j++) {
  396. priv->xfer_data_pre.xfer_pre[i++] = (op->addr.val >> (8 * (op->addr.nbytes - j - 1))) & 0xFF;
  397. }
  398. }
  399. /* dummy */
  400. if (op->dummy.nbytes) {
  401. memset(&priv->xfer_data_pre.xfer_pre[i], 0xFF, op->dummy.nbytes);
  402. i += op->dummy.nbytes;
  403. }
  404. priv->xfer_data_pre.xfer_pre_len = i;
  405. return;
  406. }
  407. static bool dw_qspi_can_xfer_32bits_frame(const struct spi_mem_op *op)
  408. {
  409. bool ret = false;
  410. if (op->data.buswidth > 1 && op->data.nbytes && !(op->data.nbytes & 0x3) && !(op->data.nbytes & 0x3)) {
  411. if (op->data.dir == SPI_MEM_DATA_OUT && !((unsigned long)(op->data.buf.out) & 0x03)) {
  412. ret = true;
  413. } else if (op->data.dir == SPI_MEM_DATA_IN && !((unsigned long)(op->data.buf.in) & 0x03)) {
  414. ret = true;
  415. }
  416. }
  417. return ret;
  418. }
  419. static int dw_qspi_xfer(struct udevice *dev, unsigned int bitlen,
  420. const void *dout, void *din, unsigned long flags)
  421. {
  422. struct udevice *bus = dev->parent;
  423. struct dw_qspi_priv *priv = dev_get_priv(bus);
  424. const u8 *tx = dout;
  425. u8 *rx = din;
  426. int ret = 0;
  427. u32 cr0 = 0,spi_cr0;
  428. u32 val;
  429. debug("%s:\n",__func__);
  430. /* spi core configured to do 8 bit transfers */
  431. if (bitlen % 8) {
  432. debug("Non byte aligned SPI transfer.\n");
  433. return -1;
  434. }
  435. /* Start the transaction if necessary. */
  436. if (flags & SPI_XFER_BEGIN)
  437. external_cs_manage(dev, false);
  438. cr0 = (7) << 16 | (priv->type << SPI_FRF_OFFSET) |
  439. ((priv->mode & 0x03) << SPI_MODE_OFFSET) |
  440. (priv->tmode << SPI_TMOD_OFFSET);
  441. #if 0
  442. if (rx && tx)
  443. priv->tmode = SPI_TMOD_TR;
  444. else if (rx)
  445. priv->tmode = SPI_TMOD_RO;
  446. else
  447. /*
  448. * In transmit only mode (SPI_TMOD_TO) input FIFO never gets
  449. * any data which breaks our logic in poll_transfer() above.
  450. */
  451. priv->tmode = SPI_TMOD_TR;
  452. #endif
  453. priv->tmode = SPI_TMOD_TO;
  454. cr0 &= ~SPI_TMOD_MASK;
  455. cr0 |= (priv->tmode << SPI_TMOD_OFFSET);
  456. /*set quad-mode for test only*/
  457. cr0 |= 2<<21;
  458. priv->len = bitlen >> 3;
  459. debug("%s: rx=%p tx=%p len=%d [bytes]\n", __func__, rx, tx, priv->len);
  460. priv->tx = (void *)tx;
  461. priv->tx_end = priv->tx + priv->len;
  462. priv->rx = rx;
  463. priv->rx_end = priv->rx + priv->len;
  464. /* Disable controller before writing control registers */
  465. spi_enable_chip(priv, 0);
  466. spi_disable_slave(priv,0);
  467. debug("%s: cr0=%08x\n", __func__, cr0);
  468. /* Reprogram cr0 only if changed */
  469. if (dw_read(priv, DW_SPI_CTRL0) != cr0)
  470. dw_write(priv, DW_SPI_CTRL0, cr0);
  471. /*set spi_ctrl0:inst_len = 1,addr_len = 1,wait_cycles = 0, both inst and addr sent in quad-mode */
  472. spi_cr0 = 2 << 0 |2<<2|2<<8;
  473. dw_write(priv,DW_SPI_SPI_CTRLR0,spi_cr0);
  474. dw_write(priv, DW_SPI_CTRL1, priv->len - 1);
  475. debug("%s: cr0:%08x,spi_cr0:%08x \n",__func__,cr0,spi_cr0);
  476. /*
  477. * Configure the desired SS (slave select 0...3) in the controller
  478. * The DW SPI controller will activate and deactivate this CS
  479. * automatically. So no cs_activate() etc is needed in this driver.
  480. */
  481. //cs = spi_chip_select(dev);
  482. //dw_write(priv, DW_SPI_SER, 1 << cs);
  483. /* Enable controller after writing control registers */
  484. spi_enable_chip(priv, 1);
  485. /* Start transfer in a polling loop */
  486. priv->n_bytes = 1;
  487. ret = poll_transfer(priv);
  488. spi_enable_slave(priv,0);
  489. /*
  490. * Wait for current transmit operation to complete.
  491. * Otherwise if some data still exists in Tx FIFO it can be
  492. * silently flushed, i.e. dropped on disabling of the controller,
  493. * which happens when writing 0 to DW_SPI_SSIENR which happens
  494. * in the beginning of new transfer.
  495. */
  496. if (readl_poll_timeout(priv->regs + DW_SPI_SR, val,
  497. (val & SR_TF_EMPT) && !(val & SR_BUSY),
  498. RX_TIMEOUT * 1000)) {
  499. ret = -ETIMEDOUT;
  500. }
  501. //printf("DW_SPI_SR:%x \n",dw_read(priv,DW_SPI_SR));
  502. /* Stop the transaction if necessary */
  503. if (flags & SPI_XFER_END)
  504. external_cs_manage(dev, true);
  505. return ret;
  506. }
  507. static int dw_qspi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
  508. {
  509. struct udevice *bus = slave->dev->parent;
  510. struct dw_qspi_priv *priv = dev_get_priv(bus);
  511. u32 cr0 = 0, spi_cr0 = 0;
  512. u32 addr_bits_len, dummy_bits_len;
  513. int ret;
  514. struct dw_qspi_platdata *plat = NULL;
  515. plat = dev_get_platdata(bus);
  516. //printf("%s,reg_base:%08x \n",__func__,priv->regs);
  517. /*disable spi core */
  518. spi_enable_chip(priv, 0);
  519. /*disable slalve*/
  520. if(op->data.dir == SPI_MEM_DATA_OUT) {
  521. spi_disable_slave(priv,0);
  522. }else {
  523. spi_enable_slave(priv,0);
  524. }
  525. /*build pre-xfer data portion*/
  526. dw_qspi_build_xfer_pre_portion(priv, op);
  527. /*
  528. * default cr0 register setting
  529. * SSI_TYPE-----> SPI_FRF_SPI
  530. * CPOL&CPHA----> SPI_CPOL=SPI_CPHA = 0
  531. * FRAME_SIZE---> 8bits
  532. * SPI_TMOD_OFFSET SPI_TMOD_TO
  533. */
  534. cr0 = (priv->type << SPI_FRF_OFFSET) |
  535. (0 << SPI_MODE_OFFSET) | \
  536. (priv->tmode << SPI_TMOD_OFFSET) | \
  537. ((8 - 1) << SPI_DFS32_OFFSET) ;
  538. priv->tmode = SPI_TMOD_TO;
  539. if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_NO_DATA) {
  540. priv->tmode = SPI_TMOD_TO;
  541. } else if (op->data.dir == SPI_MEM_DATA_IN && op->data.buswidth > 1) {
  542. priv->tmode = SPI_TMOD_RO;
  543. u32 len = op->data.nbytes;
  544. if (dw_qspi_can_xfer_32bits_frame(op)) {
  545. len >>= 2;
  546. }
  547. dw_write(priv, DW_SPI_CTRL1, len - 1);
  548. }
  549. cr0 &= ~SPI_TMOD_MASK;
  550. cr0 |= (priv->tmode << SPI_TMOD_OFFSET);
  551. /* init config spi_cr0 if use non-standard spi mode */
  552. if (op->data.buswidth > 1) {
  553. /*
  554. * trans_type = both instruction and address are sent in standard mode
  555. * instruction bits length is 8bits
  556. *
  557. */
  558. spi_cr0 = (SPI_CTRLR0_TRANS_ISTD_ASTD << SPI_CTRLR0_TRNAS_OFFET) | \
  559. (SPI_CTRLR0_INST_L_8 << SPI_CTRLR0_INST_L_OFFSET);
  560. addr_bits_len = (op->addr.nbytes << 3) >> 2;
  561. dummy_bits_len = op->dummy.nbytes << 3;
  562. spi_cr0 |= (addr_bits_len << SPI_CTRLR0_ADDR_L_OFFSET);
  563. spi_cr0 |= (dummy_bits_len << SPI_CTRLR0_WAIT_CYCLES_OFFSET);
  564. dw_write(priv, DW_SPI_SPI_CTRLR0, spi_cr0);
  565. switch (op->data.buswidth) {
  566. case 2:
  567. cr0 |= (SPI_SPI_FRF_DUAL << SPI_SPI_FRF_OFFSET);
  568. break;
  569. case 4:
  570. cr0 |= (SPI_SPI_FRF_QUAD << SPI_SPI_FRF_OFFSET);
  571. default:
  572. break;
  573. }
  574. }
  575. /*check whether can xfer through 32bit-frame size, then update cr0 */
  576. if (dw_qspi_can_xfer_32bits_frame(op)) {
  577. cr0 &= ~SPI_DFS32_MASK;
  578. cr0 |= ((32 - 1) << SPI_DFS32_OFFSET);
  579. }
  580. dw_write(priv, DW_SPI_CTRL0, cr0);
  581. /* init freq */
  582. u32 clk_div = priv->bus_clk_rate/plat->frequency;
  583. clk_div = (clk_div + 1) & 0xfffe;
  584. dw_write(priv, DW_SPI_BAUDR, clk_div);
  585. debug("%s:busclk_%lx iofreq:%x clk_div:%u \n", __func__, priv->bus_clk_rate, plat->frequency, clk_div);
  586. //printf("%s:busclk_%lx iofreq:%x clk_div:%u \n", __func__, priv->bus_clk_rate, plat->frequency, clk_div);
  587. /* for poll mode just disable all interrupts */
  588. external_cs_manage(slave->dev, false);
  589. dw_write(priv, DW_SPI_IMR, 0xff);
  590. debug("#1:cr0 %08x cr1 %08x spi_cr0 %08x \n", dw_read(priv, DW_SPI_CTRL0), dw_read(priv, DW_SPI_CTRL1), dw_read(priv, DW_SPI_SPI_CTRLR0));
  591. /* transfer data_pre portion(cmd+addr+dummy) */
  592. spi_enable_chip(priv, 1);
  593. /* pre portion is sent with spi-standard mode and data-frame size is 8bit frame */
  594. priv->n_bytes = 1;
  595. if (op->data.nbytes && op->data.buswidth > 1) {
  596. /*!!!note: in quad mode, dw-ssi can't be interrupt during sending pre portion and data portion
  597. * otherwise, the timing won't be expected
  598. */
  599. dw_write(priv, DW_SPI_DR, op->cmd.opcode);
  600. if (op->addr.nbytes) {
  601. dw_write(priv, DW_SPI_DR, op->addr.val);
  602. }
  603. } else {
  604. priv->tx = (void *)priv->xfer_data_pre.xfer_pre;
  605. priv->tx_end = priv->tx + priv->xfer_data_pre.xfer_pre_len;
  606. priv->len = priv->xfer_data_pre.xfer_pre_len;
  607. do {
  608. dw_writer(priv);
  609. } while (priv->tx_end > priv->tx);
  610. }
  611. /* transfer data portion if needs */
  612. if (op->data.dir == SPI_MEM_DATA_OUT) {
  613. priv->tx = (void*) op->data.buf.out;
  614. priv->tx_end = priv->tx + op->data.nbytes;
  615. priv->len = op->data.nbytes;
  616. if (dw_qspi_can_xfer_32bits_frame(op)) {
  617. priv->n_bytes = 4;
  618. }
  619. do {
  620. dw_writer(priv);
  621. } while (priv->tx_end > priv->tx);
  622. spi_enable_slave(priv,0);
  623. } else if (op->data.dir == SPI_MEM_DATA_IN) {
  624. /* if data portion use standard mode,pre transfer mode is tx, need set rx mode */
  625. if (op->data.buswidth == 1) {
  626. ret = dw_qspi_wait_bus_idle(priv, 10000);
  627. if (ret) {
  628. debug("wait bus idle time out \n");
  629. return ret;
  630. }
  631. spi_enable_chip(priv, 0);
  632. cr0 = dw_read(priv, DW_SPI_CTRL0);
  633. cr0 &= ~SPI_TMOD_MASK;
  634. cr0 |= (SPI_TMOD_RO << SPI_TMOD_OFFSET);
  635. dw_write(priv, DW_SPI_CTRL0, cr0);
  636. dw_write(priv, DW_SPI_CTRL1, op->data.nbytes - 1);
  637. debug("#2:cr0 %08x cr1 %08x spi_cr0 %08x \n", dw_read(priv, DW_SPI_CTRL0), dw_read(priv, DW_SPI_CTRL1), dw_read(priv, DW_SPI_SPI_CTRLR0));
  638. spi_enable_chip(priv, 1);
  639. dw_write(priv, DW_SPI_DR, 0);
  640. priv->rx = op->data.buf.in;
  641. priv->rx_end = priv->rx + op->data.nbytes;
  642. priv->len = op->data.nbytes;
  643. do {
  644. dw_reader(priv);
  645. } while (priv->rx_end > priv->rx);
  646. }
  647. /* non-standard mode */
  648. else {
  649. priv->rx = op->data.buf.in;
  650. priv->rx_end = priv->rx + op->data.nbytes;
  651. priv->len = op->data.nbytes;
  652. if (dw_qspi_can_xfer_32bits_frame(op)) {
  653. priv->n_bytes = 4;
  654. }
  655. do {
  656. dw_reader(priv);
  657. } while (priv->rx_end > priv->rx);
  658. if (op->data.nbytes && op->data.buswidth > 1) {
  659. }
  660. }
  661. }
  662. ret = dw_qspi_wait_bus_idle(priv, 500000);
  663. if (ret) {
  664. debug("wait bus idle timeout \n");
  665. return ret;
  666. }
  667. if (dw_read(priv, DW_SPI_RISR) & (SPI_INT_RXOI | SPI_INT_RXUI)) {
  668. debug("###rx err %02x \n", dw_read(priv, DW_SPI_RISR));
  669. return -1;
  670. }
  671. external_cs_manage(slave->dev, true);
  672. return 0;
  673. }
  674. static int dw_qspi_set_speed(struct udevice *bus, uint speed)
  675. {
  676. struct dw_qspi_platdata *plat = bus->platdata;
  677. struct dw_qspi_priv *priv = dev_get_priv(bus);
  678. u16 clk_div;
  679. debug("bus_clk %ld plat->freq %d speed %d \n", priv->bus_clk_rate, plat->frequency, speed);
  680. printf("%s:bus_clk %ld plat->freq %d speed %d \n",__func__, priv->bus_clk_rate, plat->frequency, speed);
  681. /* Disable controller before writing control registers */
  682. spi_enable_chip(priv, 0);
  683. (void)(speed);
  684. /* clk_div doesn't support odd number */
  685. clk_div = priv->bus_clk_rate /speed; //plat->frequency;
  686. clk_div = (clk_div + 1) & 0xfffe;
  687. dw_write(priv, DW_SPI_BAUDR, clk_div);
  688. printf("%s:div:%u \n",__func__,clk_div);
  689. /* Enable controller after writing control registers */
  690. spi_enable_chip(priv, 1);
  691. priv->freq = speed;//plat->frequency;
  692. debug("%s: regs=%p speed=%d clk_div=%d\n", __func__, priv->regs,
  693. priv->freq, clk_div);
  694. return 0;
  695. }
  696. static int dw_qspi_set_mode(struct udevice *bus, uint mode)
  697. {
  698. struct dw_qspi_priv *priv = dev_get_priv(bus);
  699. /*
  700. * Can't set mode yet. Since this depends on if rx, tx, or
  701. * rx & tx is requested. So we have to defer this to the
  702. * real transfer function.
  703. */
  704. priv->mode = mode;
  705. debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
  706. printf("%s:mode:%u \n",__func__,mode);
  707. return 0;
  708. }
  709. static int dw_qspi_remove(struct udevice *bus)
  710. {
  711. struct dw_qspi_priv *priv = dev_get_priv(bus);
  712. int ret;
  713. ret = reset_release_bulk(&priv->resets);
  714. if (ret)
  715. return ret;
  716. #if CONFIG_IS_ENABLED(CLK)
  717. ret = clk_disable(&priv->clk);
  718. if (ret)
  719. return ret;
  720. ret = clk_free(&priv->clk);
  721. if (ret)
  722. return ret;
  723. #endif
  724. return 0;
  725. }
  726. static int dw_qspi_check_buswidth(u8 width)
  727. {
  728. switch (width) {
  729. case 1:
  730. case 2:
  731. case 4:
  732. return 0;
  733. }
  734. return -ENOTSUPP;
  735. }
  736. int dw_qspi_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
  737. {
  738. if(op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes >= (256<<2) ){
  739. op->data.nbytes = 254<<2;
  740. };
  741. if(op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes >= (256<<2)){
  742. op->data.nbytes = 256<<2;
  743. }
  744. return 0;
  745. }
  746. static bool dw_qspi_supports_op(struct spi_slave *slave, const struct spi_mem_op *op)
  747. {
  748. //struct udevice *bus = slave->dev->parent;
  749. //struct dw_qspi_priv *priv = dev_get_priv(bus);
  750. int ret = 0;
  751. u32 temp_len;
  752. /* check buswidth */
  753. if (op->cmd.buswidth != 1 || \
  754. (op->addr.nbytes && op->addr.buswidth != 1) || \
  755. (op->dummy.nbytes && op->dummy.buswidth != 1)) {
  756. return false;
  757. }
  758. if (op->data.nbytes)
  759. ret |= dw_qspi_check_buswidth(op->data.buswidth);
  760. if (ret) {
  761. return false;
  762. }
  763. /* check addr bits length */
  764. temp_len = op->addr.nbytes << 3;
  765. if (op->data.nbytes && op->data.buswidth > 4 && temp_len > 60) {
  766. return false;
  767. }
  768. return true;
  769. }
  770. static const struct spi_controller_mem_ops dw_qspi_mem_ops = {
  771. .exec_op = dw_qspi_exec_op,
  772. .adjust_op_size = dw_qspi_adjust_op_size,
  773. .supports_op = dw_qspi_supports_op,
  774. };
  775. static const struct dm_spi_ops dw_qspi_ops = {
  776. .xfer = dw_qspi_xfer,
  777. .set_speed = dw_qspi_set_speed,
  778. .set_mode = dw_qspi_set_mode,
  779. .mem_ops = &dw_qspi_mem_ops,
  780. /*
  781. * cs_info is not needed, since we require all chip selects to be
  782. * in the device tree explicitly
  783. */
  784. };
  785. static const struct udevice_id dw_qspi_ids[] = {
  786. { .compatible = "snps,dw-apb-ssi-quad" },
  787. { }
  788. };
  789. U_BOOT_DRIVER(dw_qspi) = {
  790. .name = "dw_qspi",
  791. .id = UCLASS_SPI,
  792. .of_match = dw_qspi_ids,
  793. .ops = &dw_qspi_ops,
  794. .ofdata_to_platdata = dw_qspi_ofdata_to_platdata,
  795. .platdata_auto_alloc_size = sizeof(struct dw_qspi_platdata),
  796. .priv_auto_alloc_size = sizeof(struct dw_qspi_priv),
  797. .probe = dw_qspi_probe,
  798. .remove = dw_qspi_remove,
  799. };