stm32-dmamux.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) STMicroelectronics SA 2017
  5. * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  6. * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
  7. *
  8. * DMA Router driver for STM32 DMA MUX
  9. *
  10. * Based on TI DMA Crossbar driver
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/delay.h>
  14. #include <linux/err.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/of_device.h>
  18. #include <linux/of_dma.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/reset.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. #define STM32_DMAMUX_CCR(x) (0x4 * (x))
  24. #define STM32_DMAMUX_MAX_DMA_REQUESTS 32
  25. #define STM32_DMAMUX_MAX_REQUESTS 255
  26. struct stm32_dmamux {
  27. u32 master;
  28. u32 request;
  29. u32 chan_id;
  30. };
  31. struct stm32_dmamux_data {
  32. struct dma_router dmarouter;
  33. struct clk *clk;
  34. void __iomem *iomem;
  35. u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
  36. u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
  37. spinlock_t lock; /* Protects register access */
  38. unsigned long *dma_inuse; /* Used DMA channel */
  39. u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
  40. * in suspend
  41. */
  42. u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
  43. * [0] holds number of DMA Masters.
  44. * To be kept at very end end of this structure
  45. */
  46. };
  47. static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
  48. {
  49. return readl_relaxed(iomem + reg);
  50. }
  51. static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
  52. {
  53. writel_relaxed(val, iomem + reg);
  54. }
  55. static void stm32_dmamux_free(struct device *dev, void *route_data)
  56. {
  57. struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
  58. struct stm32_dmamux *mux = route_data;
  59. unsigned long flags;
  60. /* Clear dma request */
  61. spin_lock_irqsave(&dmamux->lock, flags);
  62. stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
  63. clear_bit(mux->chan_id, dmamux->dma_inuse);
  64. pm_runtime_put_sync(dev);
  65. spin_unlock_irqrestore(&dmamux->lock, flags);
  66. dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
  67. mux->request, mux->master, mux->chan_id);
  68. kfree(mux);
  69. }
  70. static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
  71. struct of_dma *ofdma)
  72. {
  73. struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
  74. struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
  75. struct stm32_dmamux *mux;
  76. u32 i, min, max;
  77. int ret;
  78. unsigned long flags;
  79. if (dma_spec->args_count != 3) {
  80. dev_err(&pdev->dev, "invalid number of dma mux args\n");
  81. return ERR_PTR(-EINVAL);
  82. }
  83. if (dma_spec->args[0] > dmamux->dmamux_requests) {
  84. dev_err(&pdev->dev, "invalid mux request number: %d\n",
  85. dma_spec->args[0]);
  86. return ERR_PTR(-EINVAL);
  87. }
  88. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  89. if (!mux)
  90. return ERR_PTR(-ENOMEM);
  91. spin_lock_irqsave(&dmamux->lock, flags);
  92. mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
  93. dmamux->dma_requests);
  94. if (mux->chan_id == dmamux->dma_requests) {
  95. spin_unlock_irqrestore(&dmamux->lock, flags);
  96. dev_err(&pdev->dev, "Run out of free DMA requests\n");
  97. ret = -ENOMEM;
  98. goto error_chan_id;
  99. }
  100. set_bit(mux->chan_id, dmamux->dma_inuse);
  101. spin_unlock_irqrestore(&dmamux->lock, flags);
  102. /* Look for DMA Master */
  103. for (i = 1, min = 0, max = dmamux->dma_reqs[i];
  104. i <= dmamux->dma_reqs[0];
  105. min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
  106. if (mux->chan_id < max)
  107. break;
  108. mux->master = i - 1;
  109. /* The of_node_put() will be done in of_dma_router_xlate function */
  110. dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
  111. if (!dma_spec->np) {
  112. dev_err(&pdev->dev, "can't get dma master\n");
  113. ret = -EINVAL;
  114. goto error;
  115. }
  116. /* Set dma request */
  117. spin_lock_irqsave(&dmamux->lock, flags);
  118. ret = pm_runtime_resume_and_get(&pdev->dev);
  119. if (ret < 0) {
  120. spin_unlock_irqrestore(&dmamux->lock, flags);
  121. goto error;
  122. }
  123. spin_unlock_irqrestore(&dmamux->lock, flags);
  124. mux->request = dma_spec->args[0];
  125. /* craft DMA spec */
  126. dma_spec->args[3] = dma_spec->args[2];
  127. dma_spec->args[2] = dma_spec->args[1];
  128. dma_spec->args[1] = 0;
  129. dma_spec->args[0] = mux->chan_id - min;
  130. dma_spec->args_count = 4;
  131. stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
  132. mux->request);
  133. dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
  134. mux->request, mux->master, mux->chan_id);
  135. return mux;
  136. error:
  137. clear_bit(mux->chan_id, dmamux->dma_inuse);
  138. error_chan_id:
  139. kfree(mux);
  140. return ERR_PTR(ret);
  141. }
  142. static const struct of_device_id stm32_stm32dma_master_match[] = {
  143. { .compatible = "st,stm32-dma", },
  144. {},
  145. };
  146. static int stm32_dmamux_probe(struct platform_device *pdev)
  147. {
  148. struct device_node *node = pdev->dev.of_node;
  149. const struct of_device_id *match;
  150. struct device_node *dma_node;
  151. struct stm32_dmamux_data *stm32_dmamux;
  152. struct resource *res;
  153. void __iomem *iomem;
  154. struct reset_control *rst;
  155. int i, count, ret;
  156. u32 dma_req;
  157. if (!node)
  158. return -ENODEV;
  159. count = device_property_count_u32(&pdev->dev, "dma-masters");
  160. if (count < 0) {
  161. dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
  162. return -ENODEV;
  163. }
  164. stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
  165. sizeof(u32) * (count + 1), GFP_KERNEL);
  166. if (!stm32_dmamux)
  167. return -ENOMEM;
  168. dma_req = 0;
  169. for (i = 1; i <= count; i++) {
  170. dma_node = of_parse_phandle(node, "dma-masters", i - 1);
  171. match = of_match_node(stm32_stm32dma_master_match, dma_node);
  172. if (!match) {
  173. dev_err(&pdev->dev, "DMA master is not supported\n");
  174. of_node_put(dma_node);
  175. return -EINVAL;
  176. }
  177. if (of_property_read_u32(dma_node, "dma-requests",
  178. &stm32_dmamux->dma_reqs[i])) {
  179. dev_info(&pdev->dev,
  180. "Missing MUX output information, using %u.\n",
  181. STM32_DMAMUX_MAX_DMA_REQUESTS);
  182. stm32_dmamux->dma_reqs[i] =
  183. STM32_DMAMUX_MAX_DMA_REQUESTS;
  184. }
  185. dma_req += stm32_dmamux->dma_reqs[i];
  186. of_node_put(dma_node);
  187. }
  188. if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
  189. dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
  190. return -ENODEV;
  191. }
  192. stm32_dmamux->dma_requests = dma_req;
  193. stm32_dmamux->dma_reqs[0] = count;
  194. stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
  195. BITS_TO_LONGS(dma_req),
  196. sizeof(unsigned long),
  197. GFP_KERNEL);
  198. if (!stm32_dmamux->dma_inuse)
  199. return -ENOMEM;
  200. if (device_property_read_u32(&pdev->dev, "dma-requests",
  201. &stm32_dmamux->dmamux_requests)) {
  202. stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
  203. dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
  204. stm32_dmamux->dmamux_requests);
  205. }
  206. pm_runtime_get_noresume(&pdev->dev);
  207. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  208. iomem = devm_ioremap_resource(&pdev->dev, res);
  209. if (IS_ERR(iomem))
  210. return PTR_ERR(iomem);
  211. spin_lock_init(&stm32_dmamux->lock);
  212. stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
  213. if (IS_ERR(stm32_dmamux->clk))
  214. return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
  215. "Missing clock controller\n");
  216. ret = clk_prepare_enable(stm32_dmamux->clk);
  217. if (ret < 0) {
  218. dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
  219. return ret;
  220. }
  221. rst = devm_reset_control_get(&pdev->dev, NULL);
  222. if (IS_ERR(rst)) {
  223. ret = PTR_ERR(rst);
  224. if (ret == -EPROBE_DEFER)
  225. goto err_clk;
  226. } else {
  227. reset_control_assert(rst);
  228. udelay(2);
  229. reset_control_deassert(rst);
  230. }
  231. stm32_dmamux->iomem = iomem;
  232. stm32_dmamux->dmarouter.dev = &pdev->dev;
  233. stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
  234. platform_set_drvdata(pdev, stm32_dmamux);
  235. pm_runtime_set_active(&pdev->dev);
  236. pm_runtime_enable(&pdev->dev);
  237. pm_runtime_get_noresume(&pdev->dev);
  238. /* Reset the dmamux */
  239. for (i = 0; i < stm32_dmamux->dma_requests; i++)
  240. stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
  241. pm_runtime_put(&pdev->dev);
  242. ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
  243. &stm32_dmamux->dmarouter);
  244. if (ret)
  245. goto pm_disable;
  246. return 0;
  247. pm_disable:
  248. pm_runtime_disable(&pdev->dev);
  249. err_clk:
  250. clk_disable_unprepare(stm32_dmamux->clk);
  251. return ret;
  252. }
  253. #ifdef CONFIG_PM
  254. static int stm32_dmamux_runtime_suspend(struct device *dev)
  255. {
  256. struct platform_device *pdev = to_platform_device(dev);
  257. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  258. clk_disable_unprepare(stm32_dmamux->clk);
  259. return 0;
  260. }
  261. static int stm32_dmamux_runtime_resume(struct device *dev)
  262. {
  263. struct platform_device *pdev = to_platform_device(dev);
  264. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  265. int ret;
  266. ret = clk_prepare_enable(stm32_dmamux->clk);
  267. if (ret) {
  268. dev_err(&pdev->dev, "failed to prepare_enable clock\n");
  269. return ret;
  270. }
  271. return 0;
  272. }
  273. #endif
  274. #ifdef CONFIG_PM_SLEEP
  275. static int stm32_dmamux_suspend(struct device *dev)
  276. {
  277. struct platform_device *pdev = to_platform_device(dev);
  278. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  279. int i, ret;
  280. ret = pm_runtime_resume_and_get(dev);
  281. if (ret < 0)
  282. return ret;
  283. for (i = 0; i < stm32_dmamux->dma_requests; i++)
  284. stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
  285. STM32_DMAMUX_CCR(i));
  286. pm_runtime_put_sync(dev);
  287. pm_runtime_force_suspend(dev);
  288. return 0;
  289. }
  290. static int stm32_dmamux_resume(struct device *dev)
  291. {
  292. struct platform_device *pdev = to_platform_device(dev);
  293. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  294. int i, ret;
  295. ret = pm_runtime_force_resume(dev);
  296. if (ret < 0)
  297. return ret;
  298. ret = pm_runtime_resume_and_get(dev);
  299. if (ret < 0)
  300. return ret;
  301. for (i = 0; i < stm32_dmamux->dma_requests; i++)
  302. stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
  303. stm32_dmamux->ccr[i]);
  304. pm_runtime_put_sync(dev);
  305. return 0;
  306. }
  307. #endif
  308. static const struct dev_pm_ops stm32_dmamux_pm_ops = {
  309. SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
  310. SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
  311. stm32_dmamux_runtime_resume, NULL)
  312. };
  313. static const struct of_device_id stm32_dmamux_match[] = {
  314. { .compatible = "st,stm32h7-dmamux" },
  315. {},
  316. };
  317. static struct platform_driver stm32_dmamux_driver = {
  318. .probe = stm32_dmamux_probe,
  319. .driver = {
  320. .name = "stm32-dmamux",
  321. .of_match_table = stm32_dmamux_match,
  322. .pm = &stm32_dmamux_pm_ops,
  323. },
  324. };
  325. static int __init stm32_dmamux_init(void)
  326. {
  327. return platform_driver_register(&stm32_dmamux_driver);
  328. }
  329. arch_initcall(stm32_dmamux_init);
  330. MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
  331. MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
  332. MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
  333. MODULE_LICENSE("GPL v2");