spi-dw-core.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Designware SPI core controller driver (refer pxa2xx_spi.c)
  4. *
  5. * Copyright (c) 2009, Intel Corporation.
  6. */
  7. #include <linux/dma-mapping.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/preempt.h>
  11. #include <linux/highmem.h>
  12. #include <linux/delay.h>
  13. #include <linux/slab.h>
  14. #include <linux/spi/spi.h>
  15. #include <linux/spi/spi-mem.h>
  16. #include <linux/string.h>
  17. #include <linux/of.h>
  18. #include "spi-dw.h"
  19. #ifdef CONFIG_DEBUG_FS
  20. #include <linux/debugfs.h>
  21. #endif
  22. /* Slave spi_device related */
  23. struct chip_data {
  24. u32 cr0;
  25. u32 rx_sample_dly; /* RX sample delay */
  26. };
  27. #ifdef CONFIG_DEBUG_FS
  28. #define DW_SPI_DBGFS_REG(_name, _off) \
  29. { \
  30. .name = _name, \
  31. .offset = _off, \
  32. }
  33. static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
  34. DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
  35. DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
  36. DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
  37. DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
  38. DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
  39. DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
  40. DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
  41. DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
  42. DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
  43. DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
  44. DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
  45. DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
  46. DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
  47. DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
  48. DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
  49. DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
  50. };
  51. static int dw_spi_debugfs_init(struct dw_spi *dws)
  52. {
  53. char name[32];
  54. snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
  55. dws->debugfs = debugfs_create_dir(name, NULL);
  56. if (!dws->debugfs)
  57. return -ENOMEM;
  58. dws->regset.regs = dw_spi_dbgfs_regs;
  59. dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
  60. dws->regset.base = dws->regs;
  61. debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
  62. return 0;
  63. }
  64. static void dw_spi_debugfs_remove(struct dw_spi *dws)
  65. {
  66. debugfs_remove_recursive(dws->debugfs);
  67. }
  68. #else
  69. static inline int dw_spi_debugfs_init(struct dw_spi *dws)
  70. {
  71. return 0;
  72. }
  73. static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
  74. {
  75. }
  76. #endif /* CONFIG_DEBUG_FS */
  77. void dw_spi_set_cs(struct spi_device *spi, bool enable)
  78. {
  79. struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  80. bool cs_high = !!(spi->mode & SPI_CS_HIGH);
  81. /*
  82. * DW SPI controller demands any native CS being set in order to
  83. * proceed with data transfer. So in order to activate the SPI
  84. * communications we must set a corresponding bit in the Slave
  85. * Enable register no matter whether the SPI core is configured to
  86. * support active-high or active-low CS level.
  87. */
  88. if (cs_high == enable)
  89. dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
  90. else
  91. dw_writel(dws, DW_SPI_SER, 0);
  92. }
  93. EXPORT_SYMBOL_GPL(dw_spi_set_cs);
  94. /* Return the max entries we can fill into tx fifo */
  95. static inline u32 tx_max(struct dw_spi *dws)
  96. {
  97. u32 tx_room, rxtx_gap;
  98. tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
  99. /*
  100. * Another concern is about the tx/rx mismatch, we
  101. * though to use (dws->fifo_len - rxflr - txflr) as
  102. * one maximum value for tx, but it doesn't cover the
  103. * data which is out of tx/rx fifo and inside the
  104. * shift registers. So a control from sw point of
  105. * view is taken.
  106. */
  107. rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
  108. return min3((u32)dws->tx_len, tx_room, rxtx_gap);
  109. }
  110. /* Return the max entries we should read out of rx fifo */
  111. static inline u32 rx_max(struct dw_spi *dws)
  112. {
  113. return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
  114. }
  115. static void dw_writer(struct dw_spi *dws)
  116. {
  117. u32 max = tx_max(dws);
  118. u16 txw = 0;
  119. while (max--) {
  120. if (dws->tx) {
  121. if (dws->n_bytes == 1)
  122. txw = *(u8 *)(dws->tx);
  123. else
  124. txw = *(u16 *)(dws->tx);
  125. dws->tx += dws->n_bytes;
  126. }
  127. dw_write_io_reg(dws, DW_SPI_DR, txw);
  128. --dws->tx_len;
  129. }
  130. }
  131. static void dw_reader(struct dw_spi *dws)
  132. {
  133. u32 max = rx_max(dws);
  134. u16 rxw;
  135. while (max--) {
  136. rxw = dw_read_io_reg(dws, DW_SPI_DR);
  137. if (dws->rx) {
  138. if (dws->n_bytes == 1)
  139. *(u8 *)(dws->rx) = rxw;
  140. else
  141. *(u16 *)(dws->rx) = rxw;
  142. dws->rx += dws->n_bytes;
  143. }
  144. --dws->rx_len;
  145. }
  146. }
  147. int dw_spi_check_status(struct dw_spi *dws, bool raw)
  148. {
  149. u32 irq_status;
  150. int ret = 0;
  151. if (raw)
  152. irq_status = dw_readl(dws, DW_SPI_RISR);
  153. else
  154. irq_status = dw_readl(dws, DW_SPI_ISR);
  155. if (irq_status & SPI_INT_RXOI) {
  156. dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
  157. ret = -EIO;
  158. }
  159. if (irq_status & SPI_INT_RXUI) {
  160. dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
  161. ret = -EIO;
  162. }
  163. if (irq_status & SPI_INT_TXOI) {
  164. dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
  165. ret = -EIO;
  166. }
  167. /* Generically handle the erroneous situation */
  168. if (ret) {
  169. spi_reset_chip(dws);
  170. if (dws->master->cur_msg)
  171. dws->master->cur_msg->status = ret;
  172. }
  173. return ret;
  174. }
  175. EXPORT_SYMBOL_GPL(dw_spi_check_status);
  176. static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
  177. {
  178. u16 irq_status = dw_readl(dws, DW_SPI_ISR);
  179. if (dw_spi_check_status(dws, false)) {
  180. spi_finalize_current_transfer(dws->master);
  181. return IRQ_HANDLED;
  182. }
  183. /*
  184. * Read data from the Rx FIFO every time we've got a chance executing
  185. * this method. If there is nothing left to receive, terminate the
  186. * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
  187. * final stage of the transfer. By doing so we'll get the next IRQ
  188. * right when the leftover incoming data is received.
  189. */
  190. dw_reader(dws);
  191. if (!dws->rx_len) {
  192. spi_mask_intr(dws, 0xff);
  193. spi_finalize_current_transfer(dws->master);
  194. } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
  195. dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
  196. }
  197. /*
  198. * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
  199. * disabled after the data transmission is finished so not to
  200. * have the TXE IRQ flood at the final stage of the transfer.
  201. */
  202. if (irq_status & SPI_INT_TXEI) {
  203. dw_writer(dws);
  204. if (!dws->tx_len)
  205. spi_mask_intr(dws, SPI_INT_TXEI);
  206. }
  207. return IRQ_HANDLED;
  208. }
  209. static irqreturn_t dw_spi_irq(int irq, void *dev_id)
  210. {
  211. struct spi_controller *master = dev_id;
  212. struct dw_spi *dws = spi_controller_get_devdata(master);
  213. u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
  214. if (!irq_status)
  215. return IRQ_NONE;
  216. if (!master->cur_msg) {
  217. spi_mask_intr(dws, 0xff);
  218. return IRQ_HANDLED;
  219. }
  220. return dws->transfer_handler(dws);
  221. }
  222. static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
  223. {
  224. u32 cr0 = 0;
  225. if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) {
  226. /* CTRLR0[ 5: 4] Frame Format */
  227. cr0 |= SSI_MOTO_SPI << SPI_FRF_OFFSET;
  228. /*
  229. * SPI mode (SCPOL|SCPH)
  230. * CTRLR0[ 6] Serial Clock Phase
  231. * CTRLR0[ 7] Serial Clock Polarity
  232. */
  233. cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET;
  234. cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET;
  235. /* CTRLR0[11] Shift Register Loop */
  236. cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET;
  237. } else {
  238. /* CTRLR0[ 7: 6] Frame Format */
  239. cr0 |= SSI_MOTO_SPI << DWC_SSI_CTRLR0_FRF_OFFSET;
  240. /*
  241. * SPI mode (SCPOL|SCPH)
  242. * CTRLR0[ 8] Serial Clock Phase
  243. * CTRLR0[ 9] Serial Clock Polarity
  244. */
  245. cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
  246. cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
  247. /* CTRLR0[13] Shift Register Loop */
  248. cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
  249. if (dws->caps & DW_SPI_CAP_KEEMBAY_MST)
  250. cr0 |= DWC_SSI_CTRLR0_KEEMBAY_MST;
  251. }
  252. return cr0;
  253. }
  254. void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
  255. struct dw_spi_cfg *cfg)
  256. {
  257. struct chip_data *chip = spi_get_ctldata(spi);
  258. u32 cr0 = chip->cr0;
  259. u32 speed_hz;
  260. u16 clk_div;
  261. /* CTRLR0[ 4/3: 0] Data Frame Size */
  262. cr0 |= (cfg->dfs - 1);
  263. if (!(dws->caps & DW_SPI_CAP_DWC_SSI))
  264. /* CTRLR0[ 9:8] Transfer Mode */
  265. cr0 |= cfg->tmode << SPI_TMOD_OFFSET;
  266. else
  267. /* CTRLR0[11:10] Transfer Mode */
  268. cr0 |= cfg->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
  269. dw_writel(dws, DW_SPI_CTRLR0, cr0);
  270. if (cfg->tmode == SPI_TMOD_EPROMREAD || cfg->tmode == SPI_TMOD_RO)
  271. dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
  272. /* Note DW APB SSI clock divider doesn't support odd numbers */
  273. clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
  274. speed_hz = dws->max_freq / clk_div;
  275. if (dws->current_freq != speed_hz) {
  276. spi_set_clk(dws, clk_div);
  277. dws->current_freq = speed_hz;
  278. }
  279. /* Update RX sample delay if required */
  280. if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
  281. dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
  282. dws->cur_rx_sample_dly = chip->rx_sample_dly;
  283. }
  284. }
  285. EXPORT_SYMBOL_GPL(dw_spi_update_config);
  286. static void dw_spi_irq_setup(struct dw_spi *dws)
  287. {
  288. u16 level;
  289. u8 imask;
  290. /*
  291. * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
  292. * will be adjusted at the final stage of the IRQ-based SPI transfer
  293. * execution so not to lose the leftover of the incoming data.
  294. */
  295. level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
  296. dw_writel(dws, DW_SPI_TXFTLR, level);
  297. dw_writel(dws, DW_SPI_RXFTLR, level - 1);
  298. dws->transfer_handler = dw_spi_transfer_handler;
  299. imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI |
  300. SPI_INT_RXFI;
  301. spi_umask_intr(dws, imask);
  302. }
  303. /*
  304. * The iterative procedure of the poll-based transfer is simple: write as much
  305. * as possible to the Tx FIFO, wait until the pending to receive data is ready
  306. * to be read, read it from the Rx FIFO and check whether the performed
  307. * procedure has been successful.
  308. *
  309. * Note this method the same way as the IRQ-based transfer won't work well for
  310. * the SPI devices connected to the controller with native CS due to the
  311. * automatic CS assertion/de-assertion.
  312. */
  313. static int dw_spi_poll_transfer(struct dw_spi *dws,
  314. struct spi_transfer *transfer)
  315. {
  316. struct spi_delay delay;
  317. u16 nbits;
  318. int ret;
  319. delay.unit = SPI_DELAY_UNIT_SCK;
  320. nbits = dws->n_bytes * BITS_PER_BYTE;
  321. do {
  322. dw_writer(dws);
  323. delay.value = nbits * (dws->rx_len - dws->tx_len);
  324. spi_delay_exec(&delay, transfer);
  325. dw_reader(dws);
  326. ret = dw_spi_check_status(dws, true);
  327. if (ret)
  328. return ret;
  329. } while (dws->rx_len);
  330. return 0;
  331. }
  332. static int dw_spi_transfer_one(struct spi_controller *master,
  333. struct spi_device *spi, struct spi_transfer *transfer)
  334. {
  335. struct dw_spi *dws = spi_controller_get_devdata(master);
  336. struct dw_spi_cfg cfg = {
  337. .tmode = SPI_TMOD_TR,
  338. .dfs = transfer->bits_per_word,
  339. .freq = transfer->speed_hz,
  340. };
  341. int ret;
  342. dws->dma_mapped = 0;
  343. dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
  344. dws->tx = (void *)transfer->tx_buf;
  345. dws->tx_len = transfer->len / dws->n_bytes;
  346. dws->rx = transfer->rx_buf;
  347. dws->rx_len = dws->tx_len;
  348. /* Ensure the data above is visible for all CPUs */
  349. smp_mb();
  350. spi_enable_chip(dws, 0);
  351. dw_spi_update_config(dws, spi, &cfg);
  352. transfer->effective_speed_hz = dws->current_freq;
  353. /* Check if current transfer is a DMA transaction */
  354. if (master->can_dma && master->can_dma(master, spi, transfer))
  355. dws->dma_mapped = master->cur_msg_mapped;
  356. /* For poll mode just disable all interrupts */
  357. spi_mask_intr(dws, 0xff);
  358. if (dws->dma_mapped) {
  359. ret = dws->dma_ops->dma_setup(dws, transfer);
  360. if (ret)
  361. return ret;
  362. }
  363. spi_enable_chip(dws, 1);
  364. if (dws->dma_mapped)
  365. return dws->dma_ops->dma_transfer(dws, transfer);
  366. else if (dws->irq == IRQ_NOTCONNECTED)
  367. return dw_spi_poll_transfer(dws, transfer);
  368. dw_spi_irq_setup(dws);
  369. return 1;
  370. }
  371. static void dw_spi_handle_err(struct spi_controller *master,
  372. struct spi_message *msg)
  373. {
  374. struct dw_spi *dws = spi_controller_get_devdata(master);
  375. if (dws->dma_mapped)
  376. dws->dma_ops->dma_stop(dws);
  377. spi_reset_chip(dws);
  378. }
  379. static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
  380. {
  381. if (op->data.dir == SPI_MEM_DATA_IN)
  382. op->data.nbytes = clamp_val(op->data.nbytes, 0, SPI_NDF_MASK + 1);
  383. return 0;
  384. }
  385. static bool dw_spi_supports_mem_op(struct spi_mem *mem,
  386. const struct spi_mem_op *op)
  387. {
  388. if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
  389. op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
  390. return false;
  391. return spi_mem_default_supports_op(mem, op);
  392. }
  393. static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
  394. {
  395. unsigned int i, j, len;
  396. u8 *out;
  397. /*
  398. * Calculate the total length of the EEPROM command transfer and
  399. * either use the pre-allocated buffer or create a temporary one.
  400. */
  401. len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
  402. if (op->data.dir == SPI_MEM_DATA_OUT)
  403. len += op->data.nbytes;
  404. if (len <= SPI_BUF_SIZE) {
  405. out = dws->buf;
  406. } else {
  407. out = kzalloc(len, GFP_KERNEL);
  408. if (!out)
  409. return -ENOMEM;
  410. }
  411. /*
  412. * Collect the operation code, address and dummy bytes into the single
  413. * buffer. If it's a transfer with data to be sent, also copy it into the
  414. * single buffer in order to speed the data transmission up.
  415. */
  416. for (i = 0; i < op->cmd.nbytes; ++i)
  417. out[i] = SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
  418. for (j = 0; j < op->addr.nbytes; ++i, ++j)
  419. out[i] = SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
  420. for (j = 0; j < op->dummy.nbytes; ++i, ++j)
  421. out[i] = 0x0;
  422. if (op->data.dir == SPI_MEM_DATA_OUT)
  423. memcpy(&out[i], op->data.buf.out, op->data.nbytes);
  424. dws->n_bytes = 1;
  425. dws->tx = out;
  426. dws->tx_len = len;
  427. if (op->data.dir == SPI_MEM_DATA_IN) {
  428. dws->rx = op->data.buf.in;
  429. dws->rx_len = op->data.nbytes;
  430. } else {
  431. dws->rx = NULL;
  432. dws->rx_len = 0;
  433. }
  434. return 0;
  435. }
  436. static void dw_spi_free_mem_buf(struct dw_spi *dws)
  437. {
  438. if (dws->tx != dws->buf)
  439. kfree(dws->tx);
  440. }
  441. static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
  442. {
  443. u32 room, entries, sts;
  444. unsigned int len;
  445. u8 *buf;
  446. /*
  447. * At initial stage we just pre-fill the Tx FIFO in with no rush,
  448. * since native CS hasn't been enabled yet and the automatic data
  449. * transmission won't start til we do that.
  450. */
  451. len = min(dws->fifo_len, dws->tx_len);
  452. buf = dws->tx;
  453. while (len--)
  454. dw_write_io_reg(dws, DW_SPI_DR, *buf++);
  455. /*
  456. * After setting any bit in the SER register the transmission will
  457. * start automatically. We have to keep up with that procedure
  458. * otherwise the CS de-assertion will happen whereupon the memory
  459. * operation will be pre-terminated.
  460. */
  461. len = dws->tx_len - ((void *)buf - dws->tx);
  462. dw_spi_set_cs(spi, false);
  463. while (len) {
  464. entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
  465. if (!entries) {
  466. dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
  467. return -EIO;
  468. }
  469. room = min(dws->fifo_len - entries, len);
  470. for (; room; --room, --len)
  471. dw_write_io_reg(dws, DW_SPI_DR, *buf++);
  472. }
  473. /*
  474. * Data fetching will start automatically if the EEPROM-read mode is
  475. * activated. We have to keep up with the incoming data pace to
  476. * prevent the Rx FIFO overflow causing the inbound data loss.
  477. */
  478. len = dws->rx_len;
  479. buf = dws->rx;
  480. while (len) {
  481. entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
  482. if (!entries) {
  483. sts = readl_relaxed(dws->regs + DW_SPI_RISR);
  484. if (sts & SPI_INT_RXOI) {
  485. dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
  486. return -EIO;
  487. }
  488. continue;
  489. }
  490. entries = min(entries, len);
  491. for (; entries; --entries, --len)
  492. *buf++ = dw_read_io_reg(dws, DW_SPI_DR);
  493. }
  494. return 0;
  495. }
  496. static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
  497. {
  498. return dw_readl(dws, DW_SPI_SR) & SR_BUSY;
  499. }
  500. static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
  501. {
  502. int retry = SPI_WAIT_RETRIES;
  503. struct spi_delay delay;
  504. unsigned long ns, us;
  505. u32 nents;
  506. nents = dw_readl(dws, DW_SPI_TXFLR);
  507. ns = NSEC_PER_SEC / dws->current_freq * nents;
  508. ns *= dws->n_bytes * BITS_PER_BYTE;
  509. if (ns <= NSEC_PER_USEC) {
  510. delay.unit = SPI_DELAY_UNIT_NSECS;
  511. delay.value = ns;
  512. } else {
  513. us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
  514. delay.unit = SPI_DELAY_UNIT_USECS;
  515. delay.value = clamp_val(us, 0, USHRT_MAX);
  516. }
  517. while (dw_spi_ctlr_busy(dws) && retry--)
  518. spi_delay_exec(&delay, NULL);
  519. if (retry < 0) {
  520. dev_err(&dws->master->dev, "Mem op hanged up\n");
  521. return -EIO;
  522. }
  523. return 0;
  524. }
  525. static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
  526. {
  527. spi_enable_chip(dws, 0);
  528. dw_spi_set_cs(spi, true);
  529. spi_enable_chip(dws, 1);
  530. }
  531. /*
  532. * The SPI memory operation implementation below is the best choice for the
  533. * devices, which are selected by the native chip-select lane. It's
  534. * specifically developed to workaround the problem with automatic chip-select
  535. * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
  536. * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
  537. * unavailable.
  538. */
  539. static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
  540. {
  541. struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
  542. struct dw_spi_cfg cfg;
  543. unsigned long flags;
  544. int ret;
  545. /*
  546. * Collect the outbound data into a single buffer to speed the
  547. * transmission up at least on the initial stage.
  548. */
  549. ret = dw_spi_init_mem_buf(dws, op);
  550. if (ret)
  551. return ret;
  552. /*
  553. * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
  554. * operation. Transmit-only mode is suitable for the rest of them.
  555. */
  556. cfg.dfs = 8;
  557. cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
  558. if (op->data.dir == SPI_MEM_DATA_IN) {
  559. cfg.tmode = SPI_TMOD_EPROMREAD;
  560. cfg.ndf = op->data.nbytes;
  561. } else {
  562. cfg.tmode = SPI_TMOD_TO;
  563. }
  564. spi_enable_chip(dws, 0);
  565. dw_spi_update_config(dws, mem->spi, &cfg);
  566. spi_mask_intr(dws, 0xff);
  567. spi_enable_chip(dws, 1);
  568. /*
  569. * DW APB SSI controller has very nasty peculiarities. First originally
  570. * (without any vendor-specific modifications) it doesn't provide a
  571. * direct way to set and clear the native chip-select signal. Instead
  572. * the controller asserts the CS lane if Tx FIFO isn't empty and a
  573. * transmission is going on, and automatically de-asserts it back to
  574. * the high level if the Tx FIFO doesn't have anything to be pushed
  575. * out. Due to that a multi-tasking or heavy IRQs activity might be
  576. * fatal, since the transfer procedure preemption may cause the Tx FIFO
  577. * getting empty and sudden CS de-assertion, which in the middle of the
  578. * transfer will most likely cause the data loss. Secondly the
  579. * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
  580. * data being automatically pulled in into the Rx FIFO. So if the
  581. * driver software is late in fetching the data from the FIFO before
  582. * it's overflown, new incoming data will be lost. In order to make
  583. * sure the executed memory operations are CS-atomic and to prevent the
  584. * Rx FIFO overflow we have to disable the local interrupts so to block
  585. * any preemption during the subsequent IO operations.
  586. *
  587. * Note. At some circumstances disabling IRQs may not help to prevent
  588. * the problems described above. The CS de-assertion and Rx FIFO
  589. * overflow may still happen due to the relatively slow system bus or
  590. * CPU not working fast enough, so the write-then-read algo implemented
  591. * here just won't keep up with the SPI bus data transfer. Such
  592. * situation is highly platform specific and is supposed to be fixed by
  593. * manually restricting the SPI bus frequency using the
  594. * dws->max_mem_freq parameter.
  595. */
  596. local_irq_save(flags);
  597. preempt_disable();
  598. ret = dw_spi_write_then_read(dws, mem->spi);
  599. local_irq_restore(flags);
  600. preempt_enable();
  601. /*
  602. * Wait for the operation being finished and check the controller
  603. * status only if there hasn't been any run-time error detected. In the
  604. * former case it's just pointless. In the later one to prevent an
  605. * additional error message printing since any hw error flag being set
  606. * would be due to an error detected on the data transfer.
  607. */
  608. if (!ret) {
  609. ret = dw_spi_wait_mem_op_done(dws);
  610. if (!ret)
  611. ret = dw_spi_check_status(dws, true);
  612. }
  613. dw_spi_stop_mem_op(dws, mem->spi);
  614. dw_spi_free_mem_buf(dws);
  615. return ret;
  616. }
  617. /*
  618. * Initialize the default memory operations if a glue layer hasn't specified
  619. * custom ones. Direct mapping operations will be preserved anyway since DW SPI
  620. * controller doesn't have an embedded dirmap interface. Note the memory
  621. * operations implemented in this driver is the best choice only for the DW APB
  622. * SSI controller with standard native CS functionality. If a hardware vendor
  623. * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
  624. * be safer to use the normal SPI-messages-based transfers implementation.
  625. */
  626. static void dw_spi_init_mem_ops(struct dw_spi *dws)
  627. {
  628. if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
  629. !dws->set_cs) {
  630. dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
  631. dws->mem_ops.supports_op = dw_spi_supports_mem_op;
  632. dws->mem_ops.exec_op = dw_spi_exec_mem_op;
  633. if (!dws->max_mem_freq)
  634. dws->max_mem_freq = dws->max_freq;
  635. }
  636. }
  637. /* This may be called twice for each spi dev */
  638. static int dw_spi_setup(struct spi_device *spi)
  639. {
  640. struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  641. struct chip_data *chip;
  642. /* Only alloc on first setup */
  643. chip = spi_get_ctldata(spi);
  644. if (!chip) {
  645. struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
  646. u32 rx_sample_dly_ns;
  647. chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
  648. if (!chip)
  649. return -ENOMEM;
  650. spi_set_ctldata(spi, chip);
  651. /* Get specific / default rx-sample-delay */
  652. if (device_property_read_u32(&spi->dev,
  653. "rx-sample-delay-ns",
  654. &rx_sample_dly_ns) != 0)
  655. /* Use default controller value */
  656. rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
  657. chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
  658. NSEC_PER_SEC /
  659. dws->max_freq);
  660. }
  661. /*
  662. * Update CR0 data each time the setup callback is invoked since
  663. * the device parameters could have been changed, for instance, by
  664. * the MMC SPI driver or something else.
  665. */
  666. chip->cr0 = dw_spi_prepare_cr0(dws, spi);
  667. return 0;
  668. }
  669. static void dw_spi_cleanup(struct spi_device *spi)
  670. {
  671. struct chip_data *chip = spi_get_ctldata(spi);
  672. kfree(chip);
  673. spi_set_ctldata(spi, NULL);
  674. }
  675. /* Restart the controller, disable all interrupts, clean rx fifo */
  676. static void spi_hw_init(struct device *dev, struct dw_spi *dws)
  677. {
  678. spi_reset_chip(dws);
  679. /*
  680. * Try to detect the FIFO depth if not set by interface driver,
  681. * the depth could be from 2 to 256 from HW spec
  682. */
  683. if (!dws->fifo_len) {
  684. u32 fifo;
  685. for (fifo = 1; fifo < 256; fifo++) {
  686. dw_writel(dws, DW_SPI_TXFTLR, fifo);
  687. if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
  688. break;
  689. }
  690. dw_writel(dws, DW_SPI_TXFTLR, 0);
  691. dws->fifo_len = (fifo == 1) ? 0 : fifo;
  692. dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
  693. }
  694. /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
  695. if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
  696. dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
  697. }
  698. int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
  699. {
  700. struct spi_controller *master;
  701. int ret;
  702. if (!dws)
  703. return -EINVAL;
  704. master = spi_alloc_master(dev, 0);
  705. if (!master)
  706. return -ENOMEM;
  707. dws->master = master;
  708. dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
  709. spi_controller_set_devdata(master, dws);
  710. /* Basic HW init */
  711. spi_hw_init(dev, dws);
  712. ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
  713. master);
  714. if (ret < 0 && ret != -ENOTCONN) {
  715. dev_err(dev, "can not get IRQ\n");
  716. goto err_free_master;
  717. }
  718. dw_spi_init_mem_ops(dws);
  719. master->use_gpio_descriptors = true;
  720. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
  721. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  722. master->bus_num = dws->bus_num;
  723. master->num_chipselect = dws->num_cs;
  724. master->setup = dw_spi_setup;
  725. master->cleanup = dw_spi_cleanup;
  726. if (dws->set_cs)
  727. master->set_cs = dws->set_cs;
  728. else
  729. master->set_cs = dw_spi_set_cs;
  730. master->transfer_one = dw_spi_transfer_one;
  731. master->handle_err = dw_spi_handle_err;
  732. if (dws->mem_ops.exec_op)
  733. master->mem_ops = &dws->mem_ops;
  734. master->max_speed_hz = dws->max_freq;
  735. master->dev.of_node = dev->of_node;
  736. master->dev.fwnode = dev->fwnode;
  737. master->flags = SPI_MASTER_GPIO_SS;
  738. master->auto_runtime_pm = true;
  739. /* Get default rx sample delay */
  740. device_property_read_u32(dev, "rx-sample-delay-ns",
  741. &dws->def_rx_sample_dly_ns);
  742. if (dws->dma_ops && dws->dma_ops->dma_init) {
  743. ret = dws->dma_ops->dma_init(dev, dws);
  744. if (ret) {
  745. dev_warn(dev, "DMA init failed\n");
  746. } else {
  747. master->can_dma = dws->dma_ops->can_dma;
  748. master->flags |= SPI_CONTROLLER_MUST_TX;
  749. }
  750. }
  751. ret = spi_register_controller(master);
  752. if (ret) {
  753. dev_err(&master->dev, "problem registering spi master\n");
  754. goto err_dma_exit;
  755. }
  756. dw_spi_debugfs_init(dws);
  757. return 0;
  758. err_dma_exit:
  759. if (dws->dma_ops && dws->dma_ops->dma_exit)
  760. dws->dma_ops->dma_exit(dws);
  761. spi_enable_chip(dws, 0);
  762. free_irq(dws->irq, master);
  763. err_free_master:
  764. spi_controller_put(master);
  765. return ret;
  766. }
  767. EXPORT_SYMBOL_GPL(dw_spi_add_host);
  768. void dw_spi_remove_host(struct dw_spi *dws)
  769. {
  770. dw_spi_debugfs_remove(dws);
  771. spi_unregister_controller(dws->master);
  772. if (dws->dma_ops && dws->dma_ops->dma_exit)
  773. dws->dma_ops->dma_exit(dws);
  774. spi_shutdown_chip(dws);
  775. free_irq(dws->irq, dws->master);
  776. }
  777. EXPORT_SYMBOL_GPL(dw_spi_remove_host);
  778. int dw_spi_suspend_host(struct dw_spi *dws)
  779. {
  780. int ret;
  781. ret = spi_controller_suspend(dws->master);
  782. if (ret)
  783. return ret;
  784. spi_shutdown_chip(dws);
  785. return 0;
  786. }
  787. EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
  788. int dw_spi_resume_host(struct dw_spi *dws)
  789. {
  790. spi_hw_init(&dws->master->dev, dws);
  791. return spi_controller_resume(dws->master);
  792. }
  793. EXPORT_SYMBOL_GPL(dw_spi_resume_host);
  794. MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
  795. MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
  796. MODULE_LICENSE("GPL v2");