spi-pic32.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Microchip PIC32 SPI controller driver.
  4. *
  5. * Purna Chandra Mandal <purna.mandal@microchip.com>
  6. * Copyright (c) 2016, Microchip Technology Inc.
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/clkdev.h>
  10. #include <linux/delay.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/highmem.h>
  14. #include <linux/module.h>
  15. #include <linux/io.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/of.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_gpio.h>
  20. #include <linux/of_address.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/spi/spi.h>
  23. /* SPI controller registers */
  24. struct pic32_spi_regs {
  25. u32 ctrl;
  26. u32 ctrl_clr;
  27. u32 ctrl_set;
  28. u32 ctrl_inv;
  29. u32 status;
  30. u32 status_clr;
  31. u32 status_set;
  32. u32 status_inv;
  33. u32 buf;
  34. u32 dontuse[3];
  35. u32 baud;
  36. u32 dontuse2[3];
  37. u32 ctrl2;
  38. u32 ctrl2_clr;
  39. u32 ctrl2_set;
  40. u32 ctrl2_inv;
  41. };
  42. /* Bit fields of SPI Control Register */
  43. #define CTRL_RX_INT_SHIFT 0 /* Rx interrupt generation */
  44. #define RX_FIFO_EMPTY 0
  45. #define RX_FIFO_NOT_EMPTY 1 /* not empty */
  46. #define RX_FIFO_HALF_FULL 2 /* full by half or more */
  47. #define RX_FIFO_FULL 3 /* completely full */
  48. #define CTRL_TX_INT_SHIFT 2 /* TX interrupt generation */
  49. #define TX_FIFO_ALL_EMPTY 0 /* completely empty */
  50. #define TX_FIFO_EMPTY 1 /* empty */
  51. #define TX_FIFO_HALF_EMPTY 2 /* empty by half or more */
  52. #define TX_FIFO_NOT_FULL 3 /* atleast one empty */
  53. #define CTRL_MSTEN BIT(5) /* enable master mode */
  54. #define CTRL_CKP BIT(6) /* active low */
  55. #define CTRL_CKE BIT(8) /* Tx on falling edge */
  56. #define CTRL_SMP BIT(9) /* Rx at middle or end of tx */
  57. #define CTRL_BPW_MASK 0x03 /* bits per word/sample */
  58. #define CTRL_BPW_SHIFT 10
  59. #define PIC32_BPW_8 0
  60. #define PIC32_BPW_16 1
  61. #define PIC32_BPW_32 2
  62. #define CTRL_SIDL BIT(13) /* sleep when idle */
  63. #define CTRL_ON BIT(15) /* enable macro */
  64. #define CTRL_ENHBUF BIT(16) /* enable enhanced buffering */
  65. #define CTRL_MCLKSEL BIT(23) /* select clock source */
  66. #define CTRL_MSSEN BIT(28) /* macro driven /SS */
  67. #define CTRL_FRMEN BIT(31) /* enable framing mode */
  68. /* Bit fields of SPI Status Register */
  69. #define STAT_RF_EMPTY BIT(5) /* RX Fifo empty */
  70. #define STAT_RX_OV BIT(6) /* err, s/w needs to clear */
  71. #define STAT_TX_UR BIT(8) /* UR in Framed SPI modes */
  72. #define STAT_FRM_ERR BIT(12) /* Multiple Frame Sync pulse */
  73. #define STAT_TF_LVL_MASK 0x1F
  74. #define STAT_TF_LVL_SHIFT 16
  75. #define STAT_RF_LVL_MASK 0x1F
  76. #define STAT_RF_LVL_SHIFT 24
  77. /* Bit fields of SPI Baud Register */
  78. #define BAUD_MASK 0x1ff
  79. /* Bit fields of SPI Control2 Register */
  80. #define CTRL2_TX_UR_EN BIT(10) /* Enable int on Tx under-run */
  81. #define CTRL2_RX_OV_EN BIT(11) /* Enable int on Rx over-run */
  82. #define CTRL2_FRM_ERR_EN BIT(12) /* Enable frame err int */
  83. /* Minimum DMA transfer size */
  84. #define PIC32_DMA_LEN_MIN 64
  85. struct pic32_spi {
  86. dma_addr_t dma_base;
  87. struct pic32_spi_regs __iomem *regs;
  88. int fault_irq;
  89. int rx_irq;
  90. int tx_irq;
  91. u32 fifo_n_byte; /* FIFO depth in bytes */
  92. struct clk *clk;
  93. struct spi_master *master;
  94. /* Current controller setting */
  95. u32 speed_hz; /* spi-clk rate */
  96. u32 mode;
  97. u32 bits_per_word;
  98. u32 fifo_n_elm; /* FIFO depth in words */
  99. #define PIC32F_DMA_PREP 0 /* DMA chnls configured */
  100. unsigned long flags;
  101. /* Current transfer state */
  102. struct completion xfer_done;
  103. /* PIO transfer specific */
  104. const void *tx;
  105. const void *tx_end;
  106. const void *rx;
  107. const void *rx_end;
  108. int len;
  109. void (*rx_fifo)(struct pic32_spi *);
  110. void (*tx_fifo)(struct pic32_spi *);
  111. };
  112. static inline void pic32_spi_enable(struct pic32_spi *pic32s)
  113. {
  114. writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_set);
  115. }
  116. static inline void pic32_spi_disable(struct pic32_spi *pic32s)
  117. {
  118. writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_clr);
  119. /* avoid SPI registers read/write at immediate next CPU clock */
  120. ndelay(20);
  121. }
  122. static void pic32_spi_set_clk_rate(struct pic32_spi *pic32s, u32 spi_ck)
  123. {
  124. u32 div;
  125. /* div = (clk_in / 2 * spi_ck) - 1 */
  126. div = DIV_ROUND_CLOSEST(clk_get_rate(pic32s->clk), 2 * spi_ck) - 1;
  127. writel(div & BAUD_MASK, &pic32s->regs->baud);
  128. }
  129. static inline u32 pic32_rx_fifo_level(struct pic32_spi *pic32s)
  130. {
  131. u32 sr = readl(&pic32s->regs->status);
  132. return (sr >> STAT_RF_LVL_SHIFT) & STAT_RF_LVL_MASK;
  133. }
  134. static inline u32 pic32_tx_fifo_level(struct pic32_spi *pic32s)
  135. {
  136. u32 sr = readl(&pic32s->regs->status);
  137. return (sr >> STAT_TF_LVL_SHIFT) & STAT_TF_LVL_MASK;
  138. }
  139. /* Return the max entries we can fill into tx fifo */
  140. static u32 pic32_tx_max(struct pic32_spi *pic32s, int n_bytes)
  141. {
  142. u32 tx_left, tx_room, rxtx_gap;
  143. tx_left = (pic32s->tx_end - pic32s->tx) / n_bytes;
  144. tx_room = pic32s->fifo_n_elm - pic32_tx_fifo_level(pic32s);
  145. /*
  146. * Another concern is about the tx/rx mismatch, we
  147. * though to use (pic32s->fifo_n_byte - rxfl - txfl) as
  148. * one maximum value for tx, but it doesn't cover the
  149. * data which is out of tx/rx fifo and inside the
  150. * shift registers. So a ctrl from sw point of
  151. * view is taken.
  152. */
  153. rxtx_gap = ((pic32s->rx_end - pic32s->rx) -
  154. (pic32s->tx_end - pic32s->tx)) / n_bytes;
  155. return min3(tx_left, tx_room, (u32)(pic32s->fifo_n_elm - rxtx_gap));
  156. }
  157. /* Return the max entries we should read out of rx fifo */
  158. static u32 pic32_rx_max(struct pic32_spi *pic32s, int n_bytes)
  159. {
  160. u32 rx_left = (pic32s->rx_end - pic32s->rx) / n_bytes;
  161. return min_t(u32, rx_left, pic32_rx_fifo_level(pic32s));
  162. }
  163. #define BUILD_SPI_FIFO_RW(__name, __type, __bwl) \
  164. static void pic32_spi_rx_##__name(struct pic32_spi *pic32s) \
  165. { \
  166. __type v; \
  167. u32 mx = pic32_rx_max(pic32s, sizeof(__type)); \
  168. for (; mx; mx--) { \
  169. v = read##__bwl(&pic32s->regs->buf); \
  170. if (pic32s->rx_end - pic32s->len) \
  171. *(__type *)(pic32s->rx) = v; \
  172. pic32s->rx += sizeof(__type); \
  173. } \
  174. } \
  175. \
  176. static void pic32_spi_tx_##__name(struct pic32_spi *pic32s) \
  177. { \
  178. __type v; \
  179. u32 mx = pic32_tx_max(pic32s, sizeof(__type)); \
  180. for (; mx ; mx--) { \
  181. v = (__type)~0U; \
  182. if (pic32s->tx_end - pic32s->len) \
  183. v = *(__type *)(pic32s->tx); \
  184. write##__bwl(v, &pic32s->regs->buf); \
  185. pic32s->tx += sizeof(__type); \
  186. } \
  187. }
  188. BUILD_SPI_FIFO_RW(byte, u8, b);
  189. BUILD_SPI_FIFO_RW(word, u16, w);
  190. BUILD_SPI_FIFO_RW(dword, u32, l);
  191. static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg)
  192. {
  193. /* disable all interrupts */
  194. disable_irq_nosync(pic32s->fault_irq);
  195. disable_irq_nosync(pic32s->rx_irq);
  196. disable_irq_nosync(pic32s->tx_irq);
  197. /* Show err message and abort xfer with err */
  198. dev_err(&pic32s->master->dev, "%s\n", msg);
  199. if (pic32s->master->cur_msg)
  200. pic32s->master->cur_msg->status = -EIO;
  201. complete(&pic32s->xfer_done);
  202. }
  203. static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id)
  204. {
  205. struct pic32_spi *pic32s = dev_id;
  206. u32 status;
  207. status = readl(&pic32s->regs->status);
  208. /* Error handling */
  209. if (status & (STAT_RX_OV | STAT_TX_UR)) {
  210. writel(STAT_RX_OV, &pic32s->regs->status_clr);
  211. writel(STAT_TX_UR, &pic32s->regs->status_clr);
  212. pic32_err_stop(pic32s, "err_irq: fifo ov/ur-run\n");
  213. return IRQ_HANDLED;
  214. }
  215. if (status & STAT_FRM_ERR) {
  216. pic32_err_stop(pic32s, "err_irq: frame error");
  217. return IRQ_HANDLED;
  218. }
  219. if (!pic32s->master->cur_msg) {
  220. pic32_err_stop(pic32s, "err_irq: no mesg");
  221. return IRQ_NONE;
  222. }
  223. return IRQ_NONE;
  224. }
  225. static irqreturn_t pic32_spi_rx_irq(int irq, void *dev_id)
  226. {
  227. struct pic32_spi *pic32s = dev_id;
  228. pic32s->rx_fifo(pic32s);
  229. /* rx complete ? */
  230. if (pic32s->rx_end == pic32s->rx) {
  231. /* disable all interrupts */
  232. disable_irq_nosync(pic32s->fault_irq);
  233. disable_irq_nosync(pic32s->rx_irq);
  234. /* complete current xfer */
  235. complete(&pic32s->xfer_done);
  236. }
  237. return IRQ_HANDLED;
  238. }
  239. static irqreturn_t pic32_spi_tx_irq(int irq, void *dev_id)
  240. {
  241. struct pic32_spi *pic32s = dev_id;
  242. pic32s->tx_fifo(pic32s);
  243. /* tx complete? disable tx interrupt */
  244. if (pic32s->tx_end == pic32s->tx)
  245. disable_irq_nosync(pic32s->tx_irq);
  246. return IRQ_HANDLED;
  247. }
  248. static void pic32_spi_dma_rx_notify(void *data)
  249. {
  250. struct pic32_spi *pic32s = data;
  251. complete(&pic32s->xfer_done);
  252. }
  253. static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
  254. struct spi_transfer *xfer)
  255. {
  256. struct spi_master *master = pic32s->master;
  257. struct dma_async_tx_descriptor *desc_rx;
  258. struct dma_async_tx_descriptor *desc_tx;
  259. dma_cookie_t cookie;
  260. int ret;
  261. if (!master->dma_rx || !master->dma_tx)
  262. return -ENODEV;
  263. desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
  264. xfer->rx_sg.sgl,
  265. xfer->rx_sg.nents,
  266. DMA_DEV_TO_MEM,
  267. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  268. if (!desc_rx) {
  269. ret = -EINVAL;
  270. goto err_dma;
  271. }
  272. desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
  273. xfer->tx_sg.sgl,
  274. xfer->tx_sg.nents,
  275. DMA_MEM_TO_DEV,
  276. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  277. if (!desc_tx) {
  278. ret = -EINVAL;
  279. goto err_dma;
  280. }
  281. /* Put callback on the RX transfer, that should finish last */
  282. desc_rx->callback = pic32_spi_dma_rx_notify;
  283. desc_rx->callback_param = pic32s;
  284. cookie = dmaengine_submit(desc_rx);
  285. ret = dma_submit_error(cookie);
  286. if (ret)
  287. goto err_dma;
  288. cookie = dmaengine_submit(desc_tx);
  289. ret = dma_submit_error(cookie);
  290. if (ret)
  291. goto err_dma_tx;
  292. dma_async_issue_pending(master->dma_rx);
  293. dma_async_issue_pending(master->dma_tx);
  294. return 0;
  295. err_dma_tx:
  296. dmaengine_terminate_all(master->dma_rx);
  297. err_dma:
  298. return ret;
  299. }
  300. static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
  301. {
  302. int buf_offset = offsetof(struct pic32_spi_regs, buf);
  303. struct spi_master *master = pic32s->master;
  304. struct dma_slave_config cfg;
  305. int ret;
  306. memset(&cfg, 0, sizeof(cfg));
  307. cfg.device_fc = true;
  308. cfg.src_addr = pic32s->dma_base + buf_offset;
  309. cfg.dst_addr = pic32s->dma_base + buf_offset;
  310. cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */
  311. cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */
  312. cfg.src_addr_width = dma_width;
  313. cfg.dst_addr_width = dma_width;
  314. /* tx channel */
  315. cfg.slave_id = pic32s->tx_irq;
  316. cfg.direction = DMA_MEM_TO_DEV;
  317. ret = dmaengine_slave_config(master->dma_tx, &cfg);
  318. if (ret) {
  319. dev_err(&master->dev, "tx channel setup failed\n");
  320. return ret;
  321. }
  322. /* rx channel */
  323. cfg.slave_id = pic32s->rx_irq;
  324. cfg.direction = DMA_DEV_TO_MEM;
  325. ret = dmaengine_slave_config(master->dma_rx, &cfg);
  326. if (ret)
  327. dev_err(&master->dev, "rx channel setup failed\n");
  328. return ret;
  329. }
  330. static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word)
  331. {
  332. enum dma_slave_buswidth dmawidth;
  333. u32 buswidth, v;
  334. switch (bits_per_word) {
  335. case 8:
  336. pic32s->rx_fifo = pic32_spi_rx_byte;
  337. pic32s->tx_fifo = pic32_spi_tx_byte;
  338. buswidth = PIC32_BPW_8;
  339. dmawidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  340. break;
  341. case 16:
  342. pic32s->rx_fifo = pic32_spi_rx_word;
  343. pic32s->tx_fifo = pic32_spi_tx_word;
  344. buswidth = PIC32_BPW_16;
  345. dmawidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  346. break;
  347. case 32:
  348. pic32s->rx_fifo = pic32_spi_rx_dword;
  349. pic32s->tx_fifo = pic32_spi_tx_dword;
  350. buswidth = PIC32_BPW_32;
  351. dmawidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
  352. break;
  353. default:
  354. /* not supported */
  355. return -EINVAL;
  356. }
  357. /* calculate maximum number of words fifos can hold */
  358. pic32s->fifo_n_elm = DIV_ROUND_UP(pic32s->fifo_n_byte,
  359. bits_per_word / 8);
  360. /* set word size */
  361. v = readl(&pic32s->regs->ctrl);
  362. v &= ~(CTRL_BPW_MASK << CTRL_BPW_SHIFT);
  363. v |= buswidth << CTRL_BPW_SHIFT;
  364. writel(v, &pic32s->regs->ctrl);
  365. /* re-configure dma width, if required */
  366. if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
  367. pic32_spi_dma_config(pic32s, dmawidth);
  368. return 0;
  369. }
  370. static int pic32_spi_prepare_hardware(struct spi_master *master)
  371. {
  372. struct pic32_spi *pic32s = spi_master_get_devdata(master);
  373. pic32_spi_enable(pic32s);
  374. return 0;
  375. }
  376. static int pic32_spi_prepare_message(struct spi_master *master,
  377. struct spi_message *msg)
  378. {
  379. struct pic32_spi *pic32s = spi_master_get_devdata(master);
  380. struct spi_device *spi = msg->spi;
  381. u32 val;
  382. /* set device specific bits_per_word */
  383. if (pic32s->bits_per_word != spi->bits_per_word) {
  384. pic32_spi_set_word_size(pic32s, spi->bits_per_word);
  385. pic32s->bits_per_word = spi->bits_per_word;
  386. }
  387. /* device specific speed change */
  388. if (pic32s->speed_hz != spi->max_speed_hz) {
  389. pic32_spi_set_clk_rate(pic32s, spi->max_speed_hz);
  390. pic32s->speed_hz = spi->max_speed_hz;
  391. }
  392. /* device specific mode change */
  393. if (pic32s->mode != spi->mode) {
  394. val = readl(&pic32s->regs->ctrl);
  395. /* active low */
  396. if (spi->mode & SPI_CPOL)
  397. val |= CTRL_CKP;
  398. else
  399. val &= ~CTRL_CKP;
  400. /* tx on rising edge */
  401. if (spi->mode & SPI_CPHA)
  402. val &= ~CTRL_CKE;
  403. else
  404. val |= CTRL_CKE;
  405. /* rx at end of tx */
  406. val |= CTRL_SMP;
  407. writel(val, &pic32s->regs->ctrl);
  408. pic32s->mode = spi->mode;
  409. }
  410. return 0;
  411. }
  412. static bool pic32_spi_can_dma(struct spi_master *master,
  413. struct spi_device *spi,
  414. struct spi_transfer *xfer)
  415. {
  416. struct pic32_spi *pic32s = spi_master_get_devdata(master);
  417. /* skip using DMA on small size transfer to avoid overhead.*/
  418. return (xfer->len >= PIC32_DMA_LEN_MIN) &&
  419. test_bit(PIC32F_DMA_PREP, &pic32s->flags);
  420. }
  421. static int pic32_spi_one_transfer(struct spi_master *master,
  422. struct spi_device *spi,
  423. struct spi_transfer *transfer)
  424. {
  425. struct pic32_spi *pic32s;
  426. bool dma_issued = false;
  427. unsigned long timeout;
  428. int ret;
  429. pic32s = spi_master_get_devdata(master);
  430. /* handle transfer specific word size change */
  431. if (transfer->bits_per_word &&
  432. (transfer->bits_per_word != pic32s->bits_per_word)) {
  433. ret = pic32_spi_set_word_size(pic32s, transfer->bits_per_word);
  434. if (ret)
  435. return ret;
  436. pic32s->bits_per_word = transfer->bits_per_word;
  437. }
  438. /* handle transfer specific speed change */
  439. if (transfer->speed_hz && (transfer->speed_hz != pic32s->speed_hz)) {
  440. pic32_spi_set_clk_rate(pic32s, transfer->speed_hz);
  441. pic32s->speed_hz = transfer->speed_hz;
  442. }
  443. reinit_completion(&pic32s->xfer_done);
  444. /* transact by DMA mode */
  445. if (transfer->rx_sg.nents && transfer->tx_sg.nents) {
  446. ret = pic32_spi_dma_transfer(pic32s, transfer);
  447. if (ret) {
  448. dev_err(&spi->dev, "dma submit error\n");
  449. return ret;
  450. }
  451. /* DMA issued */
  452. dma_issued = true;
  453. } else {
  454. /* set current transfer information */
  455. pic32s->tx = (const void *)transfer->tx_buf;
  456. pic32s->rx = (const void *)transfer->rx_buf;
  457. pic32s->tx_end = pic32s->tx + transfer->len;
  458. pic32s->rx_end = pic32s->rx + transfer->len;
  459. pic32s->len = transfer->len;
  460. /* transact by interrupt driven PIO */
  461. enable_irq(pic32s->fault_irq);
  462. enable_irq(pic32s->rx_irq);
  463. enable_irq(pic32s->tx_irq);
  464. }
  465. /* wait for completion */
  466. timeout = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
  467. if (timeout == 0) {
  468. dev_err(&spi->dev, "wait error/timedout\n");
  469. if (dma_issued) {
  470. dmaengine_terminate_all(master->dma_rx);
  471. dmaengine_terminate_all(master->dma_tx);
  472. }
  473. ret = -ETIMEDOUT;
  474. } else {
  475. ret = 0;
  476. }
  477. return ret;
  478. }
  479. static int pic32_spi_unprepare_message(struct spi_master *master,
  480. struct spi_message *msg)
  481. {
  482. /* nothing to do */
  483. return 0;
  484. }
  485. static int pic32_spi_unprepare_hardware(struct spi_master *master)
  486. {
  487. struct pic32_spi *pic32s = spi_master_get_devdata(master);
  488. pic32_spi_disable(pic32s);
  489. return 0;
  490. }
  491. /* This may be called multiple times by same spi dev */
  492. static int pic32_spi_setup(struct spi_device *spi)
  493. {
  494. if (!spi->max_speed_hz) {
  495. dev_err(&spi->dev, "No max speed HZ parameter\n");
  496. return -EINVAL;
  497. }
  498. /* PIC32 spi controller can drive /CS during transfer depending
  499. * on tx fifo fill-level. /CS will stay asserted as long as TX
  500. * fifo is non-empty, else will be deasserted indicating
  501. * completion of the ongoing transfer. This might result into
  502. * unreliable/erroneous SPI transactions.
  503. * To avoid that we will always handle /CS by toggling GPIO.
  504. */
  505. if (!gpio_is_valid(spi->cs_gpio))
  506. return -EINVAL;
  507. gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
  508. return 0;
  509. }
  510. static void pic32_spi_cleanup(struct spi_device *spi)
  511. {
  512. /* de-activate cs-gpio */
  513. gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
  514. }
  515. static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
  516. {
  517. struct spi_master *master = pic32s->master;
  518. int ret = 0;
  519. master->dma_rx = dma_request_chan(dev, "spi-rx");
  520. if (IS_ERR(master->dma_rx)) {
  521. if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
  522. ret = -EPROBE_DEFER;
  523. else
  524. dev_warn(dev, "RX channel not found.\n");
  525. master->dma_rx = NULL;
  526. goto out_err;
  527. }
  528. master->dma_tx = dma_request_chan(dev, "spi-tx");
  529. if (IS_ERR(master->dma_tx)) {
  530. if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
  531. ret = -EPROBE_DEFER;
  532. else
  533. dev_warn(dev, "TX channel not found.\n");
  534. master->dma_tx = NULL;
  535. goto out_err;
  536. }
  537. if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
  538. goto out_err;
  539. /* DMA chnls allocated and prepared */
  540. set_bit(PIC32F_DMA_PREP, &pic32s->flags);
  541. return 0;
  542. out_err:
  543. if (master->dma_rx) {
  544. dma_release_channel(master->dma_rx);
  545. master->dma_rx = NULL;
  546. }
  547. if (master->dma_tx) {
  548. dma_release_channel(master->dma_tx);
  549. master->dma_tx = NULL;
  550. }
  551. return ret;
  552. }
  553. static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
  554. {
  555. if (!test_bit(PIC32F_DMA_PREP, &pic32s->flags))
  556. return;
  557. clear_bit(PIC32F_DMA_PREP, &pic32s->flags);
  558. if (pic32s->master->dma_rx)
  559. dma_release_channel(pic32s->master->dma_rx);
  560. if (pic32s->master->dma_tx)
  561. dma_release_channel(pic32s->master->dma_tx);
  562. }
  563. static void pic32_spi_hw_init(struct pic32_spi *pic32s)
  564. {
  565. u32 ctrl;
  566. /* disable hardware */
  567. pic32_spi_disable(pic32s);
  568. ctrl = readl(&pic32s->regs->ctrl);
  569. /* enable enhanced fifo of 128bit deep */
  570. ctrl |= CTRL_ENHBUF;
  571. pic32s->fifo_n_byte = 16;
  572. /* disable framing mode */
  573. ctrl &= ~CTRL_FRMEN;
  574. /* enable master mode while disabled */
  575. ctrl |= CTRL_MSTEN;
  576. /* set tx fifo threshold interrupt */
  577. ctrl &= ~(0x3 << CTRL_TX_INT_SHIFT);
  578. ctrl |= (TX_FIFO_HALF_EMPTY << CTRL_TX_INT_SHIFT);
  579. /* set rx fifo threshold interrupt */
  580. ctrl &= ~(0x3 << CTRL_RX_INT_SHIFT);
  581. ctrl |= (RX_FIFO_NOT_EMPTY << CTRL_RX_INT_SHIFT);
  582. /* select clk source */
  583. ctrl &= ~CTRL_MCLKSEL;
  584. /* set manual /CS mode */
  585. ctrl &= ~CTRL_MSSEN;
  586. writel(ctrl, &pic32s->regs->ctrl);
  587. /* enable error reporting */
  588. ctrl = CTRL2_TX_UR_EN | CTRL2_RX_OV_EN | CTRL2_FRM_ERR_EN;
  589. writel(ctrl, &pic32s->regs->ctrl2_set);
  590. }
  591. static int pic32_spi_hw_probe(struct platform_device *pdev,
  592. struct pic32_spi *pic32s)
  593. {
  594. struct resource *mem;
  595. int ret;
  596. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  597. pic32s->regs = devm_ioremap_resource(&pdev->dev, mem);
  598. if (IS_ERR(pic32s->regs))
  599. return PTR_ERR(pic32s->regs);
  600. pic32s->dma_base = mem->start;
  601. /* get irq resources: err-irq, rx-irq, tx-irq */
  602. pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
  603. if (pic32s->fault_irq < 0)
  604. return pic32s->fault_irq;
  605. pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
  606. if (pic32s->rx_irq < 0)
  607. return pic32s->rx_irq;
  608. pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
  609. if (pic32s->tx_irq < 0)
  610. return pic32s->tx_irq;
  611. /* get clock */
  612. pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
  613. if (IS_ERR(pic32s->clk)) {
  614. dev_err(&pdev->dev, "clk not found\n");
  615. ret = PTR_ERR(pic32s->clk);
  616. goto err_unmap_mem;
  617. }
  618. ret = clk_prepare_enable(pic32s->clk);
  619. if (ret)
  620. goto err_unmap_mem;
  621. pic32_spi_hw_init(pic32s);
  622. return 0;
  623. err_unmap_mem:
  624. dev_err(&pdev->dev, "%s failed, err %d\n", __func__, ret);
  625. return ret;
  626. }
  627. static int pic32_spi_probe(struct platform_device *pdev)
  628. {
  629. struct spi_master *master;
  630. struct pic32_spi *pic32s;
  631. int ret;
  632. master = spi_alloc_master(&pdev->dev, sizeof(*pic32s));
  633. if (!master)
  634. return -ENOMEM;
  635. pic32s = spi_master_get_devdata(master);
  636. pic32s->master = master;
  637. ret = pic32_spi_hw_probe(pdev, pic32s);
  638. if (ret)
  639. goto err_master;
  640. master->dev.of_node = pdev->dev.of_node;
  641. master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
  642. master->num_chipselect = 1; /* single chip-select */
  643. master->max_speed_hz = clk_get_rate(pic32s->clk);
  644. master->setup = pic32_spi_setup;
  645. master->cleanup = pic32_spi_cleanup;
  646. master->flags = SPI_MASTER_MUST_TX | SPI_MASTER_MUST_RX;
  647. master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
  648. SPI_BPW_MASK(32);
  649. master->transfer_one = pic32_spi_one_transfer;
  650. master->prepare_message = pic32_spi_prepare_message;
  651. master->unprepare_message = pic32_spi_unprepare_message;
  652. master->prepare_transfer_hardware = pic32_spi_prepare_hardware;
  653. master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
  654. /* optional DMA support */
  655. ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
  656. if (ret)
  657. goto err_bailout;
  658. if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
  659. master->can_dma = pic32_spi_can_dma;
  660. init_completion(&pic32s->xfer_done);
  661. pic32s->mode = -1;
  662. /* install irq handlers (with irq-disabled) */
  663. irq_set_status_flags(pic32s->fault_irq, IRQ_NOAUTOEN);
  664. ret = devm_request_irq(&pdev->dev, pic32s->fault_irq,
  665. pic32_spi_fault_irq, IRQF_NO_THREAD,
  666. dev_name(&pdev->dev), pic32s);
  667. if (ret < 0) {
  668. dev_err(&pdev->dev, "request fault-irq %d\n", pic32s->rx_irq);
  669. goto err_bailout;
  670. }
  671. /* receive interrupt handler */
  672. irq_set_status_flags(pic32s->rx_irq, IRQ_NOAUTOEN);
  673. ret = devm_request_irq(&pdev->dev, pic32s->rx_irq,
  674. pic32_spi_rx_irq, IRQF_NO_THREAD,
  675. dev_name(&pdev->dev), pic32s);
  676. if (ret < 0) {
  677. dev_err(&pdev->dev, "request rx-irq %d\n", pic32s->rx_irq);
  678. goto err_bailout;
  679. }
  680. /* transmit interrupt handler */
  681. irq_set_status_flags(pic32s->tx_irq, IRQ_NOAUTOEN);
  682. ret = devm_request_irq(&pdev->dev, pic32s->tx_irq,
  683. pic32_spi_tx_irq, IRQF_NO_THREAD,
  684. dev_name(&pdev->dev), pic32s);
  685. if (ret < 0) {
  686. dev_err(&pdev->dev, "request tx-irq %d\n", pic32s->tx_irq);
  687. goto err_bailout;
  688. }
  689. /* register master */
  690. ret = devm_spi_register_master(&pdev->dev, master);
  691. if (ret) {
  692. dev_err(&master->dev, "failed registering spi master\n");
  693. goto err_bailout;
  694. }
  695. platform_set_drvdata(pdev, pic32s);
  696. return 0;
  697. err_bailout:
  698. pic32_spi_dma_unprep(pic32s);
  699. clk_disable_unprepare(pic32s->clk);
  700. err_master:
  701. spi_master_put(master);
  702. return ret;
  703. }
  704. static int pic32_spi_remove(struct platform_device *pdev)
  705. {
  706. struct pic32_spi *pic32s;
  707. pic32s = platform_get_drvdata(pdev);
  708. pic32_spi_disable(pic32s);
  709. clk_disable_unprepare(pic32s->clk);
  710. pic32_spi_dma_unprep(pic32s);
  711. return 0;
  712. }
  713. static const struct of_device_id pic32_spi_of_match[] = {
  714. {.compatible = "microchip,pic32mzda-spi",},
  715. {},
  716. };
  717. MODULE_DEVICE_TABLE(of, pic32_spi_of_match);
  718. static struct platform_driver pic32_spi_driver = {
  719. .driver = {
  720. .name = "spi-pic32",
  721. .of_match_table = of_match_ptr(pic32_spi_of_match),
  722. },
  723. .probe = pic32_spi_probe,
  724. .remove = pic32_spi_remove,
  725. };
  726. module_platform_driver(pic32_spi_driver);
  727. MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
  728. MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SPI controller.");
  729. MODULE_LICENSE("GPL v2");