fsl-edma.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * drivers/dma/fsl-edma.c
  4. *
  5. * Copyright 2013-2014 Freescale Semiconductor, Inc.
  6. *
  7. * Driver for the Freescale eDMA engine with flexible channel multiplexing
  8. * capability for DMA request sources. The eDMA block can be found on some
  9. * Vybrid and Layerscape SoCs.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/clk.h>
  14. #include <linux/of.h>
  15. #include <linux/of_device.h>
  16. #include <linux/of_address.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/of_dma.h>
  19. #include "fsl-edma-common.h"
  20. static void fsl_edma_synchronize(struct dma_chan *chan)
  21. {
  22. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  23. vchan_synchronize(&fsl_chan->vchan);
  24. }
  25. static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
  26. {
  27. struct fsl_edma_engine *fsl_edma = dev_id;
  28. unsigned int intr, ch;
  29. struct edma_regs *regs = &fsl_edma->regs;
  30. struct fsl_edma_chan *fsl_chan;
  31. intr = edma_readl(fsl_edma, regs->intl);
  32. if (!intr)
  33. return IRQ_NONE;
  34. for (ch = 0; ch < fsl_edma->n_chans; ch++) {
  35. if (intr & (0x1 << ch)) {
  36. edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
  37. fsl_chan = &fsl_edma->chans[ch];
  38. spin_lock(&fsl_chan->vchan.lock);
  39. if (!fsl_chan->edesc) {
  40. /* terminate_all called before */
  41. spin_unlock(&fsl_chan->vchan.lock);
  42. continue;
  43. }
  44. if (!fsl_chan->edesc->iscyclic) {
  45. list_del(&fsl_chan->edesc->vdesc.node);
  46. vchan_cookie_complete(&fsl_chan->edesc->vdesc);
  47. fsl_chan->edesc = NULL;
  48. fsl_chan->status = DMA_COMPLETE;
  49. fsl_chan->idle = true;
  50. } else {
  51. vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
  52. }
  53. if (!fsl_chan->edesc)
  54. fsl_edma_xfer_desc(fsl_chan);
  55. spin_unlock(&fsl_chan->vchan.lock);
  56. }
  57. }
  58. return IRQ_HANDLED;
  59. }
  60. static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
  61. {
  62. struct fsl_edma_engine *fsl_edma = dev_id;
  63. unsigned int err, ch;
  64. struct edma_regs *regs = &fsl_edma->regs;
  65. err = edma_readl(fsl_edma, regs->errl);
  66. if (!err)
  67. return IRQ_NONE;
  68. for (ch = 0; ch < fsl_edma->n_chans; ch++) {
  69. if (err & (0x1 << ch)) {
  70. fsl_edma_disable_request(&fsl_edma->chans[ch]);
  71. edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
  72. fsl_edma->chans[ch].status = DMA_ERROR;
  73. fsl_edma->chans[ch].idle = true;
  74. }
  75. }
  76. return IRQ_HANDLED;
  77. }
  78. static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
  79. {
  80. if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
  81. return IRQ_HANDLED;
  82. return fsl_edma_err_handler(irq, dev_id);
  83. }
  84. static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
  85. struct of_dma *ofdma)
  86. {
  87. struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
  88. struct dma_chan *chan, *_chan;
  89. struct fsl_edma_chan *fsl_chan;
  90. u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
  91. unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
  92. if (dma_spec->args_count != 2)
  93. return NULL;
  94. mutex_lock(&fsl_edma->fsl_edma_mutex);
  95. list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
  96. if (chan->client_count)
  97. continue;
  98. if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
  99. chan = dma_get_slave_channel(chan);
  100. if (chan) {
  101. chan->device->privatecnt++;
  102. fsl_chan = to_fsl_edma_chan(chan);
  103. fsl_chan->slave_id = dma_spec->args[1];
  104. fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
  105. true);
  106. mutex_unlock(&fsl_edma->fsl_edma_mutex);
  107. return chan;
  108. }
  109. }
  110. }
  111. mutex_unlock(&fsl_edma->fsl_edma_mutex);
  112. return NULL;
  113. }
  114. static int
  115. fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
  116. {
  117. int ret;
  118. fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
  119. if (fsl_edma->txirq < 0)
  120. return fsl_edma->txirq;
  121. fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
  122. if (fsl_edma->errirq < 0)
  123. return fsl_edma->errirq;
  124. if (fsl_edma->txirq == fsl_edma->errirq) {
  125. ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
  126. fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
  127. if (ret) {
  128. dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
  129. return ret;
  130. }
  131. } else {
  132. ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
  133. fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
  134. if (ret) {
  135. dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
  136. return ret;
  137. }
  138. ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
  139. fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
  140. if (ret) {
  141. dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
  142. return ret;
  143. }
  144. }
  145. return 0;
  146. }
  147. static int
  148. fsl_edma2_irq_init(struct platform_device *pdev,
  149. struct fsl_edma_engine *fsl_edma)
  150. {
  151. int i, ret, irq;
  152. int count;
  153. count = platform_irq_count(pdev);
  154. dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
  155. if (count <= 2) {
  156. dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
  157. return -EINVAL;
  158. }
  159. /*
  160. * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
  161. * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
  162. * For now, just simply request irq without IRQF_SHARED flag, since 16
  163. * channels are enough on i.mx7ulp whose M4 domain own some peripherals.
  164. */
  165. for (i = 0; i < count; i++) {
  166. irq = platform_get_irq(pdev, i);
  167. if (irq < 0)
  168. return -ENXIO;
  169. sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
  170. /* The last IRQ is for eDMA err */
  171. if (i == count - 1)
  172. ret = devm_request_irq(&pdev->dev, irq,
  173. fsl_edma_err_handler,
  174. 0, "eDMA2-ERR", fsl_edma);
  175. else
  176. ret = devm_request_irq(&pdev->dev, irq,
  177. fsl_edma_tx_handler, 0,
  178. fsl_edma->chans[i].chan_name,
  179. fsl_edma);
  180. if (ret)
  181. return ret;
  182. }
  183. return 0;
  184. }
  185. static void fsl_edma_irq_exit(
  186. struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
  187. {
  188. if (fsl_edma->txirq == fsl_edma->errirq) {
  189. devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
  190. } else {
  191. devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
  192. devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
  193. }
  194. }
  195. static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
  196. {
  197. int i;
  198. for (i = 0; i < nr_clocks; i++)
  199. clk_disable_unprepare(fsl_edma->muxclk[i]);
  200. }
  201. static struct fsl_edma_drvdata vf610_data = {
  202. .version = v1,
  203. .dmamuxs = DMAMUX_NR,
  204. .setup_irq = fsl_edma_irq_init,
  205. };
  206. static struct fsl_edma_drvdata ls1028a_data = {
  207. .version = v1,
  208. .dmamuxs = DMAMUX_NR,
  209. .mux_swap = true,
  210. .setup_irq = fsl_edma_irq_init,
  211. };
  212. static struct fsl_edma_drvdata imx7ulp_data = {
  213. .version = v3,
  214. .dmamuxs = 1,
  215. .has_dmaclk = true,
  216. .setup_irq = fsl_edma2_irq_init,
  217. };
  218. static const struct of_device_id fsl_edma_dt_ids[] = {
  219. { .compatible = "fsl,vf610-edma", .data = &vf610_data},
  220. { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
  221. { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
  222. { /* sentinel */ }
  223. };
  224. MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
  225. static int fsl_edma_probe(struct platform_device *pdev)
  226. {
  227. const struct of_device_id *of_id =
  228. of_match_device(fsl_edma_dt_ids, &pdev->dev);
  229. struct device_node *np = pdev->dev.of_node;
  230. struct fsl_edma_engine *fsl_edma;
  231. const struct fsl_edma_drvdata *drvdata = NULL;
  232. struct fsl_edma_chan *fsl_chan;
  233. struct edma_regs *regs;
  234. struct resource *res;
  235. int len, chans;
  236. int ret, i;
  237. if (of_id)
  238. drvdata = of_id->data;
  239. if (!drvdata) {
  240. dev_err(&pdev->dev, "unable to find driver data\n");
  241. return -EINVAL;
  242. }
  243. ret = of_property_read_u32(np, "dma-channels", &chans);
  244. if (ret) {
  245. dev_err(&pdev->dev, "Can't get dma-channels.\n");
  246. return ret;
  247. }
  248. len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
  249. fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  250. if (!fsl_edma)
  251. return -ENOMEM;
  252. fsl_edma->drvdata = drvdata;
  253. fsl_edma->n_chans = chans;
  254. mutex_init(&fsl_edma->fsl_edma_mutex);
  255. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  256. fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
  257. if (IS_ERR(fsl_edma->membase))
  258. return PTR_ERR(fsl_edma->membase);
  259. fsl_edma_setup_regs(fsl_edma);
  260. regs = &fsl_edma->regs;
  261. if (drvdata->has_dmaclk) {
  262. fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
  263. if (IS_ERR(fsl_edma->dmaclk)) {
  264. dev_err(&pdev->dev, "Missing DMA block clock.\n");
  265. return PTR_ERR(fsl_edma->dmaclk);
  266. }
  267. ret = clk_prepare_enable(fsl_edma->dmaclk);
  268. if (ret) {
  269. dev_err(&pdev->dev, "DMA clk block failed.\n");
  270. return ret;
  271. }
  272. }
  273. for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
  274. char clkname[32];
  275. res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
  276. fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
  277. if (IS_ERR(fsl_edma->muxbase[i])) {
  278. /* on error: disable all previously enabled clks */
  279. fsl_disable_clocks(fsl_edma, i);
  280. return PTR_ERR(fsl_edma->muxbase[i]);
  281. }
  282. sprintf(clkname, "dmamux%d", i);
  283. fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
  284. if (IS_ERR(fsl_edma->muxclk[i])) {
  285. dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
  286. /* on error: disable all previously enabled clks */
  287. fsl_disable_clocks(fsl_edma, i);
  288. return PTR_ERR(fsl_edma->muxclk[i]);
  289. }
  290. ret = clk_prepare_enable(fsl_edma->muxclk[i]);
  291. if (ret)
  292. /* on error: disable all previously enabled clks */
  293. fsl_disable_clocks(fsl_edma, i);
  294. }
  295. fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
  296. INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
  297. for (i = 0; i < fsl_edma->n_chans; i++) {
  298. struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
  299. fsl_chan->edma = fsl_edma;
  300. fsl_chan->pm_state = RUNNING;
  301. fsl_chan->slave_id = 0;
  302. fsl_chan->idle = true;
  303. fsl_chan->dma_dir = DMA_NONE;
  304. fsl_chan->vchan.desc_free = fsl_edma_free_desc;
  305. vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
  306. edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
  307. fsl_edma_chan_mux(fsl_chan, 0, false);
  308. }
  309. edma_writel(fsl_edma, ~0, regs->intl);
  310. ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
  311. if (ret)
  312. return ret;
  313. dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
  314. dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
  315. dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
  316. fsl_edma->dma_dev.dev = &pdev->dev;
  317. fsl_edma->dma_dev.device_alloc_chan_resources
  318. = fsl_edma_alloc_chan_resources;
  319. fsl_edma->dma_dev.device_free_chan_resources
  320. = fsl_edma_free_chan_resources;
  321. fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
  322. fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
  323. fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
  324. fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
  325. fsl_edma->dma_dev.device_pause = fsl_edma_pause;
  326. fsl_edma->dma_dev.device_resume = fsl_edma_resume;
  327. fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
  328. fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
  329. fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
  330. fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
  331. fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
  332. fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  333. platform_set_drvdata(pdev, fsl_edma);
  334. ret = dma_async_device_register(&fsl_edma->dma_dev);
  335. if (ret) {
  336. dev_err(&pdev->dev,
  337. "Can't register Freescale eDMA engine. (%d)\n", ret);
  338. fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
  339. return ret;
  340. }
  341. ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
  342. if (ret) {
  343. dev_err(&pdev->dev,
  344. "Can't register Freescale eDMA of_dma. (%d)\n", ret);
  345. dma_async_device_unregister(&fsl_edma->dma_dev);
  346. fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
  347. return ret;
  348. }
  349. /* enable round robin arbitration */
  350. edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
  351. return 0;
  352. }
  353. static int fsl_edma_remove(struct platform_device *pdev)
  354. {
  355. struct device_node *np = pdev->dev.of_node;
  356. struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
  357. fsl_edma_irq_exit(pdev, fsl_edma);
  358. fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
  359. of_dma_controller_free(np);
  360. dma_async_device_unregister(&fsl_edma->dma_dev);
  361. fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
  362. return 0;
  363. }
  364. static int fsl_edma_suspend_late(struct device *dev)
  365. {
  366. struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
  367. struct fsl_edma_chan *fsl_chan;
  368. unsigned long flags;
  369. int i;
  370. for (i = 0; i < fsl_edma->n_chans; i++) {
  371. fsl_chan = &fsl_edma->chans[i];
  372. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  373. /* Make sure chan is idle or will force disable. */
  374. if (unlikely(!fsl_chan->idle)) {
  375. dev_warn(dev, "WARN: There is non-idle channel.");
  376. fsl_edma_disable_request(fsl_chan);
  377. fsl_edma_chan_mux(fsl_chan, 0, false);
  378. }
  379. fsl_chan->pm_state = SUSPENDED;
  380. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  381. }
  382. return 0;
  383. }
  384. static int fsl_edma_resume_early(struct device *dev)
  385. {
  386. struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
  387. struct fsl_edma_chan *fsl_chan;
  388. struct edma_regs *regs = &fsl_edma->regs;
  389. int i;
  390. for (i = 0; i < fsl_edma->n_chans; i++) {
  391. fsl_chan = &fsl_edma->chans[i];
  392. fsl_chan->pm_state = RUNNING;
  393. edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
  394. if (fsl_chan->slave_id != 0)
  395. fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
  396. }
  397. edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
  398. return 0;
  399. }
  400. /*
  401. * eDMA provides the service to others, so it should be suspend late
  402. * and resume early. When eDMA suspend, all of the clients should stop
  403. * the DMA data transmission and let the channel idle.
  404. */
  405. static const struct dev_pm_ops fsl_edma_pm_ops = {
  406. .suspend_late = fsl_edma_suspend_late,
  407. .resume_early = fsl_edma_resume_early,
  408. };
  409. static struct platform_driver fsl_edma_driver = {
  410. .driver = {
  411. .name = "fsl-edma",
  412. .of_match_table = fsl_edma_dt_ids,
  413. .pm = &fsl_edma_pm_ops,
  414. },
  415. .probe = fsl_edma_probe,
  416. .remove = fsl_edma_remove,
  417. };
  418. static int __init fsl_edma_init(void)
  419. {
  420. return platform_driver_register(&fsl_edma_driver);
  421. }
  422. subsys_initcall(fsl_edma_init);
  423. static void __exit fsl_edma_exit(void)
  424. {
  425. platform_driver_unregister(&fsl_edma_driver);
  426. }
  427. module_exit(fsl_edma_exit);
  428. MODULE_ALIAS("platform:fsl-edma");
  429. MODULE_DESCRIPTION("Freescale eDMA engine driver");
  430. MODULE_LICENSE("GPL v2");