uniphier-mdmac.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Copyright (C) 2018 Socionext Inc.
  4. // Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  5. #include <linux/bits.h>
  6. #include <linux/clk.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/dmaengine.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/iopoll.h>
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_dma.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/slab.h>
  17. #include <linux/types.h>
  18. #include "virt-dma.h"
  19. /* registers common for all channels */
  20. #define UNIPHIER_MDMAC_CMD 0x000 /* issue DMA start/abort */
  21. #define UNIPHIER_MDMAC_CMD_ABORT BIT(31) /* 1: abort, 0: start */
  22. /* per-channel registers */
  23. #define UNIPHIER_MDMAC_CH_OFFSET 0x100
  24. #define UNIPHIER_MDMAC_CH_STRIDE 0x040
  25. #define UNIPHIER_MDMAC_CH_IRQ_STAT 0x010 /* current hw status (RO) */
  26. #define UNIPHIER_MDMAC_CH_IRQ_REQ 0x014 /* latched STAT (WOC) */
  27. #define UNIPHIER_MDMAC_CH_IRQ_EN 0x018 /* IRQ enable mask */
  28. #define UNIPHIER_MDMAC_CH_IRQ_DET 0x01c /* REQ & EN (RO) */
  29. #define UNIPHIER_MDMAC_CH_IRQ__ABORT BIT(13)
  30. #define UNIPHIER_MDMAC_CH_IRQ__DONE BIT(1)
  31. #define UNIPHIER_MDMAC_CH_SRC_MODE 0x020 /* mode of source */
  32. #define UNIPHIER_MDMAC_CH_DEST_MODE 0x024 /* mode of destination */
  33. #define UNIPHIER_MDMAC_CH_MODE__ADDR_INC (0 << 4)
  34. #define UNIPHIER_MDMAC_CH_MODE__ADDR_DEC (1 << 4)
  35. #define UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED (2 << 4)
  36. #define UNIPHIER_MDMAC_CH_SRC_ADDR 0x028 /* source address */
  37. #define UNIPHIER_MDMAC_CH_DEST_ADDR 0x02c /* destination address */
  38. #define UNIPHIER_MDMAC_CH_SIZE 0x030 /* transfer bytes */
  39. #define UNIPHIER_MDMAC_SLAVE_BUSWIDTHS \
  40. (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  41. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  42. BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
  43. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  44. struct uniphier_mdmac_desc {
  45. struct virt_dma_desc vd;
  46. struct scatterlist *sgl;
  47. unsigned int sg_len;
  48. unsigned int sg_cur;
  49. enum dma_transfer_direction dir;
  50. };
  51. struct uniphier_mdmac_chan {
  52. struct virt_dma_chan vc;
  53. struct uniphier_mdmac_device *mdev;
  54. struct uniphier_mdmac_desc *md;
  55. void __iomem *reg_ch_base;
  56. unsigned int chan_id;
  57. };
  58. struct uniphier_mdmac_device {
  59. struct dma_device ddev;
  60. struct clk *clk;
  61. void __iomem *reg_base;
  62. struct uniphier_mdmac_chan channels[];
  63. };
  64. static struct uniphier_mdmac_chan *
  65. to_uniphier_mdmac_chan(struct virt_dma_chan *vc)
  66. {
  67. return container_of(vc, struct uniphier_mdmac_chan, vc);
  68. }
  69. static struct uniphier_mdmac_desc *
  70. to_uniphier_mdmac_desc(struct virt_dma_desc *vd)
  71. {
  72. return container_of(vd, struct uniphier_mdmac_desc, vd);
  73. }
  74. /* mc->vc.lock must be held by caller */
  75. static struct uniphier_mdmac_desc *
  76. uniphier_mdmac_next_desc(struct uniphier_mdmac_chan *mc)
  77. {
  78. struct virt_dma_desc *vd;
  79. vd = vchan_next_desc(&mc->vc);
  80. if (!vd) {
  81. mc->md = NULL;
  82. return NULL;
  83. }
  84. list_del(&vd->node);
  85. mc->md = to_uniphier_mdmac_desc(vd);
  86. return mc->md;
  87. }
  88. /* mc->vc.lock must be held by caller */
  89. static void uniphier_mdmac_handle(struct uniphier_mdmac_chan *mc,
  90. struct uniphier_mdmac_desc *md)
  91. {
  92. struct uniphier_mdmac_device *mdev = mc->mdev;
  93. struct scatterlist *sg;
  94. u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__DONE;
  95. u32 src_mode, src_addr, dest_mode, dest_addr, chunk_size;
  96. sg = &md->sgl[md->sg_cur];
  97. if (md->dir == DMA_MEM_TO_DEV) {
  98. src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC;
  99. src_addr = sg_dma_address(sg);
  100. dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED;
  101. dest_addr = 0;
  102. } else {
  103. src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED;
  104. src_addr = 0;
  105. dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC;
  106. dest_addr = sg_dma_address(sg);
  107. }
  108. chunk_size = sg_dma_len(sg);
  109. writel(src_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_MODE);
  110. writel(dest_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_MODE);
  111. writel(src_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_ADDR);
  112. writel(dest_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_ADDR);
  113. writel(chunk_size, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SIZE);
  114. /* write 1 to clear */
  115. writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ);
  116. writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_EN);
  117. writel(BIT(mc->chan_id), mdev->reg_base + UNIPHIER_MDMAC_CMD);
  118. }
  119. /* mc->vc.lock must be held by caller */
  120. static void uniphier_mdmac_start(struct uniphier_mdmac_chan *mc)
  121. {
  122. struct uniphier_mdmac_desc *md;
  123. md = uniphier_mdmac_next_desc(mc);
  124. if (md)
  125. uniphier_mdmac_handle(mc, md);
  126. }
  127. /* mc->vc.lock must be held by caller */
  128. static int uniphier_mdmac_abort(struct uniphier_mdmac_chan *mc)
  129. {
  130. struct uniphier_mdmac_device *mdev = mc->mdev;
  131. u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__ABORT;
  132. u32 val;
  133. /* write 1 to clear */
  134. writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ);
  135. writel(UNIPHIER_MDMAC_CMD_ABORT | BIT(mc->chan_id),
  136. mdev->reg_base + UNIPHIER_MDMAC_CMD);
  137. /*
  138. * Abort should be accepted soon. We poll the bit here instead of
  139. * waiting for the interrupt.
  140. */
  141. return readl_poll_timeout(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ,
  142. val, val & irq_flag, 0, 20);
  143. }
  144. static irqreturn_t uniphier_mdmac_interrupt(int irq, void *dev_id)
  145. {
  146. struct uniphier_mdmac_chan *mc = dev_id;
  147. struct uniphier_mdmac_desc *md;
  148. irqreturn_t ret = IRQ_HANDLED;
  149. u32 irq_stat;
  150. spin_lock(&mc->vc.lock);
  151. irq_stat = readl(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_DET);
  152. /*
  153. * Some channels share a single interrupt line. If the IRQ status is 0,
  154. * this is probably triggered by a different channel.
  155. */
  156. if (!irq_stat) {
  157. ret = IRQ_NONE;
  158. goto out;
  159. }
  160. /* write 1 to clear */
  161. writel(irq_stat, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ);
  162. /*
  163. * UNIPHIER_MDMAC_CH_IRQ__DONE interrupt is asserted even when the DMA
  164. * is aborted. To distinguish the normal completion and the abort,
  165. * check mc->md. If it is NULL, we are aborting.
  166. */
  167. md = mc->md;
  168. if (!md)
  169. goto out;
  170. md->sg_cur++;
  171. if (md->sg_cur >= md->sg_len) {
  172. vchan_cookie_complete(&md->vd);
  173. md = uniphier_mdmac_next_desc(mc);
  174. if (!md)
  175. goto out;
  176. }
  177. uniphier_mdmac_handle(mc, md);
  178. out:
  179. spin_unlock(&mc->vc.lock);
  180. return ret;
  181. }
  182. static void uniphier_mdmac_free_chan_resources(struct dma_chan *chan)
  183. {
  184. vchan_free_chan_resources(to_virt_chan(chan));
  185. }
  186. static struct dma_async_tx_descriptor *
  187. uniphier_mdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  188. unsigned int sg_len,
  189. enum dma_transfer_direction direction,
  190. unsigned long flags, void *context)
  191. {
  192. struct virt_dma_chan *vc = to_virt_chan(chan);
  193. struct uniphier_mdmac_desc *md;
  194. if (!is_slave_direction(direction))
  195. return NULL;
  196. md = kzalloc(sizeof(*md), GFP_NOWAIT);
  197. if (!md)
  198. return NULL;
  199. md->sgl = sgl;
  200. md->sg_len = sg_len;
  201. md->dir = direction;
  202. return vchan_tx_prep(vc, &md->vd, flags);
  203. }
  204. static int uniphier_mdmac_terminate_all(struct dma_chan *chan)
  205. {
  206. struct virt_dma_chan *vc = to_virt_chan(chan);
  207. struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc);
  208. unsigned long flags;
  209. int ret = 0;
  210. LIST_HEAD(head);
  211. spin_lock_irqsave(&vc->lock, flags);
  212. if (mc->md) {
  213. vchan_terminate_vdesc(&mc->md->vd);
  214. mc->md = NULL;
  215. ret = uniphier_mdmac_abort(mc);
  216. }
  217. vchan_get_all_descriptors(vc, &head);
  218. spin_unlock_irqrestore(&vc->lock, flags);
  219. vchan_dma_desc_free_list(vc, &head);
  220. return ret;
  221. }
  222. static void uniphier_mdmac_synchronize(struct dma_chan *chan)
  223. {
  224. vchan_synchronize(to_virt_chan(chan));
  225. }
  226. static enum dma_status uniphier_mdmac_tx_status(struct dma_chan *chan,
  227. dma_cookie_t cookie,
  228. struct dma_tx_state *txstate)
  229. {
  230. struct virt_dma_chan *vc;
  231. struct virt_dma_desc *vd;
  232. struct uniphier_mdmac_chan *mc;
  233. struct uniphier_mdmac_desc *md = NULL;
  234. enum dma_status stat;
  235. unsigned long flags;
  236. int i;
  237. stat = dma_cookie_status(chan, cookie, txstate);
  238. /* Return immediately if we do not need to compute the residue. */
  239. if (stat == DMA_COMPLETE || !txstate)
  240. return stat;
  241. vc = to_virt_chan(chan);
  242. spin_lock_irqsave(&vc->lock, flags);
  243. mc = to_uniphier_mdmac_chan(vc);
  244. if (mc->md && mc->md->vd.tx.cookie == cookie) {
  245. /* residue from the on-flight chunk */
  246. txstate->residue = readl(mc->reg_ch_base +
  247. UNIPHIER_MDMAC_CH_SIZE);
  248. md = mc->md;
  249. }
  250. if (!md) {
  251. vd = vchan_find_desc(vc, cookie);
  252. if (vd)
  253. md = to_uniphier_mdmac_desc(vd);
  254. }
  255. if (md) {
  256. /* residue from the queued chunks */
  257. for (i = md->sg_cur; i < md->sg_len; i++)
  258. txstate->residue += sg_dma_len(&md->sgl[i]);
  259. }
  260. spin_unlock_irqrestore(&vc->lock, flags);
  261. return stat;
  262. }
  263. static void uniphier_mdmac_issue_pending(struct dma_chan *chan)
  264. {
  265. struct virt_dma_chan *vc = to_virt_chan(chan);
  266. struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc);
  267. unsigned long flags;
  268. spin_lock_irqsave(&vc->lock, flags);
  269. if (vchan_issue_pending(vc) && !mc->md)
  270. uniphier_mdmac_start(mc);
  271. spin_unlock_irqrestore(&vc->lock, flags);
  272. }
  273. static void uniphier_mdmac_desc_free(struct virt_dma_desc *vd)
  274. {
  275. kfree(to_uniphier_mdmac_desc(vd));
  276. }
  277. static int uniphier_mdmac_chan_init(struct platform_device *pdev,
  278. struct uniphier_mdmac_device *mdev,
  279. int chan_id)
  280. {
  281. struct device *dev = &pdev->dev;
  282. struct uniphier_mdmac_chan *mc = &mdev->channels[chan_id];
  283. char *irq_name;
  284. int irq, ret;
  285. irq = platform_get_irq(pdev, chan_id);
  286. if (irq < 0)
  287. return irq;
  288. irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d",
  289. chan_id);
  290. if (!irq_name)
  291. return -ENOMEM;
  292. ret = devm_request_irq(dev, irq, uniphier_mdmac_interrupt,
  293. IRQF_SHARED, irq_name, mc);
  294. if (ret)
  295. return ret;
  296. mc->mdev = mdev;
  297. mc->reg_ch_base = mdev->reg_base + UNIPHIER_MDMAC_CH_OFFSET +
  298. UNIPHIER_MDMAC_CH_STRIDE * chan_id;
  299. mc->chan_id = chan_id;
  300. mc->vc.desc_free = uniphier_mdmac_desc_free;
  301. vchan_init(&mc->vc, &mdev->ddev);
  302. return 0;
  303. }
  304. static int uniphier_mdmac_probe(struct platform_device *pdev)
  305. {
  306. struct device *dev = &pdev->dev;
  307. struct uniphier_mdmac_device *mdev;
  308. struct dma_device *ddev;
  309. int nr_chans, ret, i;
  310. nr_chans = platform_irq_count(pdev);
  311. if (nr_chans < 0)
  312. return nr_chans;
  313. ret = dma_set_mask(dev, DMA_BIT_MASK(32));
  314. if (ret)
  315. return ret;
  316. mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
  317. GFP_KERNEL);
  318. if (!mdev)
  319. return -ENOMEM;
  320. mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
  321. if (IS_ERR(mdev->reg_base))
  322. return PTR_ERR(mdev->reg_base);
  323. mdev->clk = devm_clk_get(dev, NULL);
  324. if (IS_ERR(mdev->clk)) {
  325. dev_err(dev, "failed to get clock\n");
  326. return PTR_ERR(mdev->clk);
  327. }
  328. ret = clk_prepare_enable(mdev->clk);
  329. if (ret)
  330. return ret;
  331. ddev = &mdev->ddev;
  332. ddev->dev = dev;
  333. dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
  334. ddev->src_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS;
  335. ddev->dst_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS;
  336. ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
  337. ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
  338. ddev->device_free_chan_resources = uniphier_mdmac_free_chan_resources;
  339. ddev->device_prep_slave_sg = uniphier_mdmac_prep_slave_sg;
  340. ddev->device_terminate_all = uniphier_mdmac_terminate_all;
  341. ddev->device_synchronize = uniphier_mdmac_synchronize;
  342. ddev->device_tx_status = uniphier_mdmac_tx_status;
  343. ddev->device_issue_pending = uniphier_mdmac_issue_pending;
  344. INIT_LIST_HEAD(&ddev->channels);
  345. for (i = 0; i < nr_chans; i++) {
  346. ret = uniphier_mdmac_chan_init(pdev, mdev, i);
  347. if (ret)
  348. goto disable_clk;
  349. }
  350. ret = dma_async_device_register(ddev);
  351. if (ret)
  352. goto disable_clk;
  353. ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id,
  354. ddev);
  355. if (ret)
  356. goto unregister_dmac;
  357. platform_set_drvdata(pdev, mdev);
  358. return 0;
  359. unregister_dmac:
  360. dma_async_device_unregister(ddev);
  361. disable_clk:
  362. clk_disable_unprepare(mdev->clk);
  363. return ret;
  364. }
  365. static int uniphier_mdmac_remove(struct platform_device *pdev)
  366. {
  367. struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
  368. struct dma_chan *chan;
  369. int ret;
  370. /*
  371. * Before reaching here, almost all descriptors have been freed by the
  372. * ->device_free_chan_resources() hook. However, each channel might
  373. * be still holding one descriptor that was on-flight at that moment.
  374. * Terminate it to make sure this hardware is no longer running. Then,
  375. * free the channel resources once again to avoid memory leak.
  376. */
  377. list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
  378. ret = dmaengine_terminate_sync(chan);
  379. if (ret)
  380. return ret;
  381. uniphier_mdmac_free_chan_resources(chan);
  382. }
  383. of_dma_controller_free(pdev->dev.of_node);
  384. dma_async_device_unregister(&mdev->ddev);
  385. clk_disable_unprepare(mdev->clk);
  386. return 0;
  387. }
  388. static const struct of_device_id uniphier_mdmac_match[] = {
  389. { .compatible = "socionext,uniphier-mio-dmac" },
  390. { /* sentinel */ }
  391. };
  392. MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
  393. static struct platform_driver uniphier_mdmac_driver = {
  394. .probe = uniphier_mdmac_probe,
  395. .remove = uniphier_mdmac_remove,
  396. .driver = {
  397. .name = "uniphier-mio-dmac",
  398. .of_match_table = uniphier_mdmac_match,
  399. },
  400. };
  401. module_platform_driver(uniphier_mdmac_driver);
  402. MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
  403. MODULE_DESCRIPTION("UniPhier MIO DMAC driver");
  404. MODULE_LICENSE("GPL v2");