mtk-uart-apdma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MediaTek UART APDMA driver.
  4. *
  5. * Copyright (c) 2019 MediaTek Inc.
  6. * Author: Long Cheng <long.cheng@mediatek.com>
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/dmaengine.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/err.h>
  12. #include <linux/init.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/kernel.h>
  16. #include <linux/list.h>
  17. #include <linux/module.h>
  18. #include <linux/of_device.h>
  19. #include <linux/of_dma.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include "../virt-dma.h"
  25. /* The default number of virtual channel */
  26. #define MTK_UART_APDMA_NR_VCHANS 8
  27. #define VFF_EN_B BIT(0)
  28. #define VFF_STOP_B BIT(0)
  29. #define VFF_FLUSH_B BIT(0)
  30. #define VFF_4G_EN_B BIT(0)
  31. /* rx valid size >= vff thre */
  32. #define VFF_RX_INT_EN_B (BIT(0) | BIT(1))
  33. /* tx left size >= vff thre */
  34. #define VFF_TX_INT_EN_B BIT(0)
  35. #define VFF_WARM_RST_B BIT(0)
  36. #define VFF_RX_INT_CLR_B (BIT(0) | BIT(1))
  37. #define VFF_TX_INT_CLR_B 0
  38. #define VFF_STOP_CLR_B 0
  39. #define VFF_EN_CLR_B 0
  40. #define VFF_INT_EN_CLR_B 0
  41. #define VFF_4G_SUPPORT_CLR_B 0
  42. /*
  43. * interrupt trigger level for tx
  44. * if threshold is n, no polling is required to start tx.
  45. * otherwise need polling VFF_FLUSH.
  46. */
  47. #define VFF_TX_THRE(n) (n)
  48. /* interrupt trigger level for rx */
  49. #define VFF_RX_THRE(n) ((n) * 3 / 4)
  50. #define VFF_RING_SIZE 0xffff
  51. /* invert this bit when wrap ring head again */
  52. #define VFF_RING_WRAP 0x10000
  53. #define VFF_INT_FLAG 0x00
  54. #define VFF_INT_EN 0x04
  55. #define VFF_EN 0x08
  56. #define VFF_RST 0x0c
  57. #define VFF_STOP 0x10
  58. #define VFF_FLUSH 0x14
  59. #define VFF_ADDR 0x1c
  60. #define VFF_LEN 0x24
  61. #define VFF_THRE 0x28
  62. #define VFF_WPT 0x2c
  63. #define VFF_RPT 0x30
  64. /* TX: the buffer size HW can read. RX: the buffer size SW can read. */
  65. #define VFF_VALID_SIZE 0x3c
  66. /* TX: the buffer size SW can write. RX: the buffer size HW can write. */
  67. #define VFF_LEFT_SIZE 0x40
  68. #define VFF_DEBUG_STATUS 0x50
  69. #define VFF_4G_SUPPORT 0x54
  70. struct mtk_uart_apdmadev {
  71. struct dma_device ddev;
  72. struct clk *clk;
  73. bool support_33bits;
  74. unsigned int dma_requests;
  75. };
  76. struct mtk_uart_apdma_desc {
  77. struct virt_dma_desc vd;
  78. dma_addr_t addr;
  79. unsigned int avail_len;
  80. };
  81. struct mtk_chan {
  82. struct virt_dma_chan vc;
  83. struct dma_slave_config cfg;
  84. struct mtk_uart_apdma_desc *desc;
  85. enum dma_transfer_direction dir;
  86. void __iomem *base;
  87. unsigned int irq;
  88. unsigned int rx_status;
  89. };
  90. static inline struct mtk_uart_apdmadev *
  91. to_mtk_uart_apdma_dev(struct dma_device *d)
  92. {
  93. return container_of(d, struct mtk_uart_apdmadev, ddev);
  94. }
  95. static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
  96. {
  97. return container_of(c, struct mtk_chan, vc.chan);
  98. }
  99. static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
  100. (struct dma_async_tx_descriptor *t)
  101. {
  102. return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
  103. }
  104. static void mtk_uart_apdma_write(struct mtk_chan *c,
  105. unsigned int reg, unsigned int val)
  106. {
  107. writel(val, c->base + reg);
  108. }
  109. static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
  110. {
  111. return readl(c->base + reg);
  112. }
  113. static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
  114. {
  115. kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
  116. }
  117. static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
  118. {
  119. struct mtk_uart_apdmadev *mtkd =
  120. to_mtk_uart_apdma_dev(c->vc.chan.device);
  121. struct mtk_uart_apdma_desc *d = c->desc;
  122. unsigned int wpt, vff_sz;
  123. vff_sz = c->cfg.dst_port_window_size;
  124. if (!mtk_uart_apdma_read(c, VFF_LEN)) {
  125. mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
  126. mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
  127. mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
  128. mtk_uart_apdma_write(c, VFF_WPT, 0);
  129. mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
  130. if (mtkd->support_33bits)
  131. mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
  132. }
  133. mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
  134. if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
  135. dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
  136. if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
  137. mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
  138. return;
  139. }
  140. wpt = mtk_uart_apdma_read(c, VFF_WPT);
  141. wpt += c->desc->avail_len;
  142. if ((wpt & VFF_RING_SIZE) == vff_sz)
  143. wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
  144. /* Let DMA start moving data */
  145. mtk_uart_apdma_write(c, VFF_WPT, wpt);
  146. /* HW auto set to 0 when left size >= threshold */
  147. mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
  148. if (!mtk_uart_apdma_read(c, VFF_FLUSH))
  149. mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
  150. }
  151. static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
  152. {
  153. struct mtk_uart_apdmadev *mtkd =
  154. to_mtk_uart_apdma_dev(c->vc.chan.device);
  155. struct mtk_uart_apdma_desc *d = c->desc;
  156. unsigned int vff_sz;
  157. vff_sz = c->cfg.src_port_window_size;
  158. if (!mtk_uart_apdma_read(c, VFF_LEN)) {
  159. mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
  160. mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
  161. mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
  162. mtk_uart_apdma_write(c, VFF_RPT, 0);
  163. mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
  164. if (mtkd->support_33bits)
  165. mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
  166. }
  167. mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
  168. mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
  169. if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
  170. dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
  171. }
  172. static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
  173. {
  174. mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
  175. mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
  176. mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
  177. }
  178. static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
  179. {
  180. struct mtk_uart_apdma_desc *d = c->desc;
  181. unsigned int len, wg, rg;
  182. int cnt;
  183. mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
  184. if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
  185. return;
  186. mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
  187. mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
  188. len = c->cfg.src_port_window_size;
  189. rg = mtk_uart_apdma_read(c, VFF_RPT);
  190. wg = mtk_uart_apdma_read(c, VFF_WPT);
  191. cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
  192. /*
  193. * The buffer is ring buffer. If wrap bit different,
  194. * represents the start of the next cycle for WPT
  195. */
  196. if ((rg ^ wg) & VFF_RING_WRAP)
  197. cnt += len;
  198. c->rx_status = d->avail_len - cnt;
  199. mtk_uart_apdma_write(c, VFF_RPT, wg);
  200. }
  201. static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
  202. {
  203. struct mtk_uart_apdma_desc *d = c->desc;
  204. if (d) {
  205. list_del(&d->vd.node);
  206. vchan_cookie_complete(&d->vd);
  207. c->desc = NULL;
  208. }
  209. }
  210. static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
  211. {
  212. struct dma_chan *chan = (struct dma_chan *)dev_id;
  213. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  214. unsigned long flags;
  215. spin_lock_irqsave(&c->vc.lock, flags);
  216. if (c->dir == DMA_DEV_TO_MEM)
  217. mtk_uart_apdma_rx_handler(c);
  218. else if (c->dir == DMA_MEM_TO_DEV)
  219. mtk_uart_apdma_tx_handler(c);
  220. mtk_uart_apdma_chan_complete_handler(c);
  221. spin_unlock_irqrestore(&c->vc.lock, flags);
  222. return IRQ_HANDLED;
  223. }
  224. static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
  225. {
  226. struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
  227. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  228. unsigned int status;
  229. int ret;
  230. ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
  231. if (ret < 0) {
  232. pm_runtime_put_noidle(chan->device->dev);
  233. return ret;
  234. }
  235. mtk_uart_apdma_write(c, VFF_ADDR, 0);
  236. mtk_uart_apdma_write(c, VFF_THRE, 0);
  237. mtk_uart_apdma_write(c, VFF_LEN, 0);
  238. mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
  239. ret = readx_poll_timeout(readl, c->base + VFF_EN,
  240. status, !status, 10, 100);
  241. if (ret)
  242. goto err_pm;
  243. ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
  244. IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
  245. if (ret < 0) {
  246. dev_err(chan->device->dev, "Can't request dma IRQ\n");
  247. ret = -EINVAL;
  248. goto err_pm;
  249. }
  250. if (mtkd->support_33bits)
  251. mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
  252. err_pm:
  253. pm_runtime_put_noidle(mtkd->ddev.dev);
  254. return ret;
  255. }
  256. static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
  257. {
  258. struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
  259. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  260. free_irq(c->irq, chan);
  261. tasklet_kill(&c->vc.task);
  262. vchan_free_chan_resources(&c->vc);
  263. pm_runtime_put_sync(mtkd->ddev.dev);
  264. }
  265. static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
  266. dma_cookie_t cookie,
  267. struct dma_tx_state *txstate)
  268. {
  269. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  270. enum dma_status ret;
  271. ret = dma_cookie_status(chan, cookie, txstate);
  272. if (!txstate)
  273. return ret;
  274. dma_set_residue(txstate, c->rx_status);
  275. return ret;
  276. }
  277. /*
  278. * dmaengine_prep_slave_single will call the function. and sglen is 1.
  279. * 8250 uart using one ring buffer, and deal with one sg.
  280. */
  281. static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
  282. (struct dma_chan *chan, struct scatterlist *sgl,
  283. unsigned int sglen, enum dma_transfer_direction dir,
  284. unsigned long tx_flags, void *context)
  285. {
  286. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  287. struct mtk_uart_apdma_desc *d;
  288. if (!is_slave_direction(dir) || sglen != 1)
  289. return NULL;
  290. /* Now allocate and setup the descriptor */
  291. d = kzalloc(sizeof(*d), GFP_NOWAIT);
  292. if (!d)
  293. return NULL;
  294. d->avail_len = sg_dma_len(sgl);
  295. d->addr = sg_dma_address(sgl);
  296. c->dir = dir;
  297. return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
  298. }
  299. static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
  300. {
  301. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  302. struct virt_dma_desc *vd;
  303. unsigned long flags;
  304. spin_lock_irqsave(&c->vc.lock, flags);
  305. if (vchan_issue_pending(&c->vc) && !c->desc) {
  306. vd = vchan_next_desc(&c->vc);
  307. c->desc = to_mtk_uart_apdma_desc(&vd->tx);
  308. if (c->dir == DMA_DEV_TO_MEM)
  309. mtk_uart_apdma_start_rx(c);
  310. else if (c->dir == DMA_MEM_TO_DEV)
  311. mtk_uart_apdma_start_tx(c);
  312. }
  313. spin_unlock_irqrestore(&c->vc.lock, flags);
  314. }
  315. static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
  316. struct dma_slave_config *config)
  317. {
  318. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  319. memcpy(&c->cfg, config, sizeof(*config));
  320. return 0;
  321. }
  322. static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
  323. {
  324. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  325. unsigned long flags;
  326. unsigned int status;
  327. LIST_HEAD(head);
  328. int ret;
  329. mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
  330. ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
  331. status, status != VFF_FLUSH_B, 10, 100);
  332. if (ret)
  333. dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
  334. mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
  335. /*
  336. * Stop need 3 steps.
  337. * 1. set stop to 1
  338. * 2. wait en to 0
  339. * 3. set stop as 0
  340. */
  341. mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
  342. ret = readx_poll_timeout(readl, c->base + VFF_EN,
  343. status, !status, 10, 100);
  344. if (ret)
  345. dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
  346. mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
  347. mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
  348. mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
  349. if (c->dir == DMA_DEV_TO_MEM)
  350. mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
  351. else if (c->dir == DMA_MEM_TO_DEV)
  352. mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
  353. synchronize_irq(c->irq);
  354. spin_lock_irqsave(&c->vc.lock, flags);
  355. vchan_get_all_descriptors(&c->vc, &head);
  356. spin_unlock_irqrestore(&c->vc.lock, flags);
  357. vchan_dma_desc_free_list(&c->vc, &head);
  358. return 0;
  359. }
  360. static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
  361. {
  362. struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
  363. unsigned long flags;
  364. spin_lock_irqsave(&c->vc.lock, flags);
  365. mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
  366. mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
  367. synchronize_irq(c->irq);
  368. spin_unlock_irqrestore(&c->vc.lock, flags);
  369. return 0;
  370. }
  371. static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
  372. {
  373. while (!list_empty(&mtkd->ddev.channels)) {
  374. struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
  375. struct mtk_chan, vc.chan.device_node);
  376. list_del(&c->vc.chan.device_node);
  377. tasklet_kill(&c->vc.task);
  378. }
  379. }
  380. static const struct of_device_id mtk_uart_apdma_match[] = {
  381. { .compatible = "mediatek,mt6577-uart-dma", },
  382. { /* sentinel */ },
  383. };
  384. MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
  385. static int mtk_uart_apdma_probe(struct platform_device *pdev)
  386. {
  387. struct device_node *np = pdev->dev.of_node;
  388. struct mtk_uart_apdmadev *mtkd;
  389. int bit_mask = 32, rc;
  390. struct mtk_chan *c;
  391. unsigned int i;
  392. mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
  393. if (!mtkd)
  394. return -ENOMEM;
  395. mtkd->clk = devm_clk_get(&pdev->dev, NULL);
  396. if (IS_ERR(mtkd->clk)) {
  397. dev_err(&pdev->dev, "No clock specified\n");
  398. rc = PTR_ERR(mtkd->clk);
  399. return rc;
  400. }
  401. if (of_property_read_bool(np, "mediatek,dma-33bits"))
  402. mtkd->support_33bits = true;
  403. if (mtkd->support_33bits)
  404. bit_mask = 33;
  405. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
  406. if (rc)
  407. return rc;
  408. dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
  409. mtkd->ddev.device_alloc_chan_resources =
  410. mtk_uart_apdma_alloc_chan_resources;
  411. mtkd->ddev.device_free_chan_resources =
  412. mtk_uart_apdma_free_chan_resources;
  413. mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
  414. mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
  415. mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
  416. mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
  417. mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
  418. mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
  419. mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
  420. mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
  421. mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  422. mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
  423. mtkd->ddev.dev = &pdev->dev;
  424. INIT_LIST_HEAD(&mtkd->ddev.channels);
  425. mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
  426. if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
  427. dev_info(&pdev->dev,
  428. "Using %u as missing dma-requests property\n",
  429. MTK_UART_APDMA_NR_VCHANS);
  430. }
  431. for (i = 0; i < mtkd->dma_requests; i++) {
  432. c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
  433. if (!c) {
  434. rc = -ENODEV;
  435. goto err_no_dma;
  436. }
  437. c->base = devm_platform_ioremap_resource(pdev, i);
  438. if (IS_ERR(c->base)) {
  439. rc = PTR_ERR(c->base);
  440. goto err_no_dma;
  441. }
  442. c->vc.desc_free = mtk_uart_apdma_desc_free;
  443. vchan_init(&c->vc, &mtkd->ddev);
  444. rc = platform_get_irq(pdev, i);
  445. if (rc < 0)
  446. goto err_no_dma;
  447. c->irq = rc;
  448. }
  449. pm_runtime_enable(&pdev->dev);
  450. pm_runtime_set_active(&pdev->dev);
  451. rc = dma_async_device_register(&mtkd->ddev);
  452. if (rc)
  453. goto rpm_disable;
  454. platform_set_drvdata(pdev, mtkd);
  455. /* Device-tree DMA controller registration */
  456. rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
  457. if (rc)
  458. goto dma_remove;
  459. return rc;
  460. dma_remove:
  461. dma_async_device_unregister(&mtkd->ddev);
  462. rpm_disable:
  463. pm_runtime_disable(&pdev->dev);
  464. err_no_dma:
  465. mtk_uart_apdma_free(mtkd);
  466. return rc;
  467. }
  468. static int mtk_uart_apdma_remove(struct platform_device *pdev)
  469. {
  470. struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
  471. of_dma_controller_free(pdev->dev.of_node);
  472. mtk_uart_apdma_free(mtkd);
  473. dma_async_device_unregister(&mtkd->ddev);
  474. pm_runtime_disable(&pdev->dev);
  475. return 0;
  476. }
  477. #ifdef CONFIG_PM_SLEEP
  478. static int mtk_uart_apdma_suspend(struct device *dev)
  479. {
  480. struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
  481. if (!pm_runtime_suspended(dev))
  482. clk_disable_unprepare(mtkd->clk);
  483. return 0;
  484. }
  485. static int mtk_uart_apdma_resume(struct device *dev)
  486. {
  487. int ret;
  488. struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
  489. if (!pm_runtime_suspended(dev)) {
  490. ret = clk_prepare_enable(mtkd->clk);
  491. if (ret)
  492. return ret;
  493. }
  494. return 0;
  495. }
  496. #endif /* CONFIG_PM_SLEEP */
  497. #ifdef CONFIG_PM
  498. static int mtk_uart_apdma_runtime_suspend(struct device *dev)
  499. {
  500. struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
  501. clk_disable_unprepare(mtkd->clk);
  502. return 0;
  503. }
  504. static int mtk_uart_apdma_runtime_resume(struct device *dev)
  505. {
  506. struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
  507. return clk_prepare_enable(mtkd->clk);
  508. }
  509. #endif /* CONFIG_PM */
  510. static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
  511. SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
  512. SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
  513. mtk_uart_apdma_runtime_resume, NULL)
  514. };
  515. static struct platform_driver mtk_uart_apdma_driver = {
  516. .probe = mtk_uart_apdma_probe,
  517. .remove = mtk_uart_apdma_remove,
  518. .driver = {
  519. .name = KBUILD_MODNAME,
  520. .pm = &mtk_uart_apdma_pm_ops,
  521. .of_match_table = of_match_ptr(mtk_uart_apdma_match),
  522. },
  523. };
  524. module_platform_driver(mtk_uart_apdma_driver);
  525. MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
  526. MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
  527. MODULE_LICENSE("GPL v2");