apbh_dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Freescale i.MX28 APBH DMA driver
  4. *
  5. * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
  6. * on behalf of DENX Software Engineering GmbH
  7. *
  8. * Based on code from LTIB:
  9. * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
  10. * Copyright 2017 NXP
  11. *
  12. */
  13. #include <cpu_func.h>
  14. #include <asm/cache.h>
  15. #include <linux/list.h>
  16. #include <common.h>
  17. #include <malloc.h>
  18. #include <linux/errno.h>
  19. #include <asm/io.h>
  20. #include <asm/arch/clock.h>
  21. #include <asm/arch/imx-regs.h>
  22. #include <asm/arch/sys_proto.h>
  23. #include <asm/mach-imx/dma.h>
  24. #include <asm/mach-imx/regs-apbh.h>
  25. static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
  26. /*
  27. * Test is the DMA channel is valid channel
  28. */
  29. int mxs_dma_validate_chan(int channel)
  30. {
  31. struct mxs_dma_chan *pchan;
  32. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  33. return -EINVAL;
  34. pchan = mxs_dma_channels + channel;
  35. if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
  36. return -EINVAL;
  37. return 0;
  38. }
  39. /*
  40. * Return the address of the command within a descriptor.
  41. */
  42. static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
  43. {
  44. return desc->address + offsetof(struct mxs_dma_desc, cmd);
  45. }
  46. /*
  47. * Read a DMA channel's hardware semaphore.
  48. *
  49. * As used by the MXS platform's DMA software, the DMA channel's hardware
  50. * semaphore reflects the number of DMA commands the hardware will process, but
  51. * has not yet finished. This is a volatile value read directly from hardware,
  52. * so it must be be viewed as immediately stale.
  53. *
  54. * If the channel is not marked busy, or has finished processing all its
  55. * commands, this value should be zero.
  56. *
  57. * See mxs_dma_append() for details on how DMA command blocks must be configured
  58. * to maintain the expected behavior of the semaphore's value.
  59. */
  60. static int mxs_dma_read_semaphore(int channel)
  61. {
  62. struct mxs_apbh_regs *apbh_regs =
  63. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  64. uint32_t tmp;
  65. int ret;
  66. ret = mxs_dma_validate_chan(channel);
  67. if (ret)
  68. return ret;
  69. tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
  70. tmp &= APBH_CHn_SEMA_PHORE_MASK;
  71. tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
  72. return tmp;
  73. }
  74. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  75. void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
  76. {
  77. uint32_t addr;
  78. uint32_t size;
  79. addr = (uintptr_t)desc;
  80. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  81. flush_dcache_range(addr, addr + size);
  82. }
  83. #else
  84. inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
  85. #endif
  86. /*
  87. * Enable a DMA channel.
  88. *
  89. * If the given channel has any DMA descriptors on its active list, this
  90. * function causes the DMA hardware to begin processing them.
  91. *
  92. * This function marks the DMA channel as "busy," whether or not there are any
  93. * descriptors to process.
  94. */
  95. static int mxs_dma_enable(int channel)
  96. {
  97. struct mxs_apbh_regs *apbh_regs =
  98. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  99. unsigned int sem;
  100. struct mxs_dma_chan *pchan;
  101. struct mxs_dma_desc *pdesc;
  102. int ret;
  103. ret = mxs_dma_validate_chan(channel);
  104. if (ret)
  105. return ret;
  106. pchan = mxs_dma_channels + channel;
  107. if (pchan->pending_num == 0) {
  108. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  109. return 0;
  110. }
  111. pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
  112. if (pdesc == NULL)
  113. return -EFAULT;
  114. if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
  115. if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
  116. return 0;
  117. sem = mxs_dma_read_semaphore(channel);
  118. if (sem == 0)
  119. return 0;
  120. if (sem == 1) {
  121. pdesc = list_entry(pdesc->node.next,
  122. struct mxs_dma_desc, node);
  123. writel(mxs_dma_cmd_address(pdesc),
  124. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  125. }
  126. writel(pchan->pending_num,
  127. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  128. pchan->active_num += pchan->pending_num;
  129. pchan->pending_num = 0;
  130. } else {
  131. pchan->active_num += pchan->pending_num;
  132. pchan->pending_num = 0;
  133. writel(mxs_dma_cmd_address(pdesc),
  134. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  135. writel(pchan->active_num,
  136. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  137. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  138. &apbh_regs->hw_apbh_ctrl0_clr);
  139. }
  140. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  141. return 0;
  142. }
  143. /*
  144. * Disable a DMA channel.
  145. *
  146. * This function shuts down a DMA channel and marks it as "not busy." Any
  147. * descriptors on the active list are immediately moved to the head of the
  148. * "done" list, whether or not they have actually been processed by the
  149. * hardware. The "ready" flags of these descriptors are NOT cleared, so they
  150. * still appear to be active.
  151. *
  152. * This function immediately shuts down a DMA channel's hardware, aborting any
  153. * I/O that may be in progress, potentially leaving I/O hardware in an undefined
  154. * state. It is unwise to call this function if there is ANY chance the hardware
  155. * is still processing a command.
  156. */
  157. static int mxs_dma_disable(int channel)
  158. {
  159. struct mxs_dma_chan *pchan;
  160. struct mxs_apbh_regs *apbh_regs =
  161. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  162. int ret;
  163. ret = mxs_dma_validate_chan(channel);
  164. if (ret)
  165. return ret;
  166. pchan = mxs_dma_channels + channel;
  167. if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
  168. return -EINVAL;
  169. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  170. &apbh_regs->hw_apbh_ctrl0_set);
  171. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  172. pchan->active_num = 0;
  173. pchan->pending_num = 0;
  174. list_splice_init(&pchan->active, &pchan->done);
  175. return 0;
  176. }
  177. /*
  178. * Resets the DMA channel hardware.
  179. */
  180. static int mxs_dma_reset(int channel)
  181. {
  182. struct mxs_apbh_regs *apbh_regs =
  183. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  184. int ret;
  185. #if defined(CONFIG_MX23)
  186. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
  187. uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
  188. #elif defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7) || \
  189. defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)
  190. u32 setreg = (uintptr_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
  191. u32 offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
  192. #endif
  193. ret = mxs_dma_validate_chan(channel);
  194. if (ret)
  195. return ret;
  196. writel(1 << (channel + offset), (uintptr_t)setreg);
  197. return 0;
  198. }
  199. /*
  200. * Enable or disable DMA interrupt.
  201. *
  202. * This function enables the given DMA channel to interrupt the CPU.
  203. */
  204. static int mxs_dma_enable_irq(int channel, int enable)
  205. {
  206. struct mxs_apbh_regs *apbh_regs =
  207. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  208. int ret;
  209. ret = mxs_dma_validate_chan(channel);
  210. if (ret)
  211. return ret;
  212. if (enable)
  213. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  214. &apbh_regs->hw_apbh_ctrl1_set);
  215. else
  216. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  217. &apbh_regs->hw_apbh_ctrl1_clr);
  218. return 0;
  219. }
  220. /*
  221. * Clear DMA interrupt.
  222. *
  223. * The software that is using the DMA channel must register to receive its
  224. * interrupts and, when they arrive, must call this function to clear them.
  225. */
  226. static int mxs_dma_ack_irq(int channel)
  227. {
  228. struct mxs_apbh_regs *apbh_regs =
  229. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  230. int ret;
  231. ret = mxs_dma_validate_chan(channel);
  232. if (ret)
  233. return ret;
  234. writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
  235. writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
  236. return 0;
  237. }
  238. /*
  239. * Request to reserve a DMA channel
  240. */
  241. static int mxs_dma_request(int channel)
  242. {
  243. struct mxs_dma_chan *pchan;
  244. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  245. return -EINVAL;
  246. pchan = mxs_dma_channels + channel;
  247. if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
  248. return -ENODEV;
  249. if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
  250. return -EBUSY;
  251. pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
  252. pchan->active_num = 0;
  253. pchan->pending_num = 0;
  254. INIT_LIST_HEAD(&pchan->active);
  255. INIT_LIST_HEAD(&pchan->done);
  256. return 0;
  257. }
  258. /*
  259. * Release a DMA channel.
  260. *
  261. * This function releases a DMA channel from its current owner.
  262. *
  263. * The channel will NOT be released if it's marked "busy" (see
  264. * mxs_dma_enable()).
  265. */
  266. int mxs_dma_release(int channel)
  267. {
  268. struct mxs_dma_chan *pchan;
  269. int ret;
  270. ret = mxs_dma_validate_chan(channel);
  271. if (ret)
  272. return ret;
  273. pchan = mxs_dma_channels + channel;
  274. if (pchan->flags & MXS_DMA_FLAGS_BUSY)
  275. return -EBUSY;
  276. pchan->dev = 0;
  277. pchan->active_num = 0;
  278. pchan->pending_num = 0;
  279. pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
  280. return 0;
  281. }
  282. /*
  283. * Allocate DMA descriptor
  284. */
  285. struct mxs_dma_desc *mxs_dma_desc_alloc(void)
  286. {
  287. struct mxs_dma_desc *pdesc;
  288. uint32_t size;
  289. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  290. pdesc = memalign(MXS_DMA_ALIGNMENT, size);
  291. if (pdesc == NULL)
  292. return NULL;
  293. memset(pdesc, 0, sizeof(*pdesc));
  294. pdesc->address = (dma_addr_t)pdesc;
  295. return pdesc;
  296. };
  297. /*
  298. * Free DMA descriptor
  299. */
  300. void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
  301. {
  302. if (pdesc == NULL)
  303. return;
  304. free(pdesc);
  305. }
  306. /*
  307. * Add a DMA descriptor to a channel.
  308. *
  309. * If the descriptor list for this channel is not empty, this function sets the
  310. * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
  311. * it will chain to the new descriptor's command.
  312. *
  313. * Then, this function marks the new descriptor as "ready," adds it to the end
  314. * of the active descriptor list, and increments the count of pending
  315. * descriptors.
  316. *
  317. * The MXS platform DMA software imposes some rules on DMA commands to maintain
  318. * important invariants. These rules are NOT checked, but they must be carefully
  319. * applied by software that uses MXS DMA channels.
  320. *
  321. * Invariant:
  322. * The DMA channel's hardware semaphore must reflect the number of DMA
  323. * commands the hardware will process, but has not yet finished.
  324. *
  325. * Explanation:
  326. * A DMA channel begins processing commands when its hardware semaphore is
  327. * written with a value greater than zero, and it stops processing commands
  328. * when the semaphore returns to zero.
  329. *
  330. * When a channel finishes a DMA command, it will decrement its semaphore if
  331. * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
  332. *
  333. * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
  334. * unless it suits the purposes of the software. For example, one could
  335. * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
  336. * bit set only in the last one. Then, setting the DMA channel's hardware
  337. * semaphore to one would cause the entire series of five commands to be
  338. * processed. However, this example would violate the invariant given above.
  339. *
  340. * Rule:
  341. * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
  342. * channel's hardware semaphore will be decremented EVERY time a command is
  343. * processed.
  344. */
  345. int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
  346. {
  347. struct mxs_dma_chan *pchan;
  348. struct mxs_dma_desc *last;
  349. int ret;
  350. ret = mxs_dma_validate_chan(channel);
  351. if (ret)
  352. return ret;
  353. pchan = mxs_dma_channels + channel;
  354. pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
  355. pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
  356. if (!list_empty(&pchan->active)) {
  357. last = list_entry(pchan->active.prev, struct mxs_dma_desc,
  358. node);
  359. pdesc->flags &= ~MXS_DMA_DESC_FIRST;
  360. last->flags &= ~MXS_DMA_DESC_LAST;
  361. last->cmd.next = mxs_dma_cmd_address(pdesc);
  362. last->cmd.data |= MXS_DMA_DESC_CHAIN;
  363. mxs_dma_flush_desc(last);
  364. }
  365. pdesc->flags |= MXS_DMA_DESC_READY;
  366. if (pdesc->flags & MXS_DMA_DESC_FIRST)
  367. pchan->pending_num++;
  368. list_add_tail(&pdesc->node, &pchan->active);
  369. mxs_dma_flush_desc(pdesc);
  370. return ret;
  371. }
  372. /*
  373. * Clean up processed DMA descriptors.
  374. *
  375. * This function removes processed DMA descriptors from the "active" list. Pass
  376. * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
  377. * to get the descriptors moved to the channel's "done" list. Descriptors on
  378. * the "done" list can be retrieved with mxs_dma_get_finished().
  379. *
  380. * This function marks the DMA channel as "not busy" if no unprocessed
  381. * descriptors remain on the "active" list.
  382. */
  383. static int mxs_dma_finish(int channel, struct list_head *head)
  384. {
  385. int sem;
  386. struct mxs_dma_chan *pchan;
  387. struct list_head *p, *q;
  388. struct mxs_dma_desc *pdesc;
  389. int ret;
  390. ret = mxs_dma_validate_chan(channel);
  391. if (ret)
  392. return ret;
  393. pchan = mxs_dma_channels + channel;
  394. sem = mxs_dma_read_semaphore(channel);
  395. if (sem < 0)
  396. return sem;
  397. if (sem == pchan->active_num)
  398. return 0;
  399. list_for_each_safe(p, q, &pchan->active) {
  400. if ((pchan->active_num) <= sem)
  401. break;
  402. pdesc = list_entry(p, struct mxs_dma_desc, node);
  403. pdesc->flags &= ~MXS_DMA_DESC_READY;
  404. if (head)
  405. list_move_tail(p, head);
  406. else
  407. list_move_tail(p, &pchan->done);
  408. if (pdesc->flags & MXS_DMA_DESC_LAST)
  409. pchan->active_num--;
  410. }
  411. if (sem == 0)
  412. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  413. return 0;
  414. }
  415. /*
  416. * Wait for DMA channel to complete
  417. */
  418. static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
  419. {
  420. struct mxs_apbh_regs *apbh_regs =
  421. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  422. int ret;
  423. ret = mxs_dma_validate_chan(chan);
  424. if (ret)
  425. return ret;
  426. if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
  427. 1 << chan, timeout)) {
  428. ret = -ETIMEDOUT;
  429. mxs_dma_reset(chan);
  430. }
  431. return ret;
  432. }
  433. /*
  434. * Execute the DMA channel
  435. */
  436. int mxs_dma_go(int chan)
  437. {
  438. uint32_t timeout = 10000000;
  439. int ret;
  440. LIST_HEAD(tmp_desc_list);
  441. mxs_dma_enable_irq(chan, 1);
  442. mxs_dma_enable(chan);
  443. /* Wait for DMA to finish. */
  444. ret = mxs_dma_wait_complete(timeout, chan);
  445. /* Clear out the descriptors we just ran. */
  446. mxs_dma_finish(chan, &tmp_desc_list);
  447. /* Shut the DMA channel down. */
  448. mxs_dma_ack_irq(chan);
  449. mxs_dma_reset(chan);
  450. mxs_dma_enable_irq(chan, 0);
  451. mxs_dma_disable(chan);
  452. return ret;
  453. }
  454. /*
  455. * Execute a continuously running circular DMA descriptor.
  456. * NOTE: This is not intended for general use, but rather
  457. * for the LCD driver in Smart-LCD mode. It allows
  458. * continuous triggering of the RUN bit there.
  459. */
  460. void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc)
  461. {
  462. struct mxs_apbh_regs *apbh_regs =
  463. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  464. mxs_dma_flush_desc(pdesc);
  465. mxs_dma_enable_irq(chan, 1);
  466. writel(mxs_dma_cmd_address(pdesc),
  467. &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar);
  468. writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema);
  469. writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  470. &apbh_regs->hw_apbh_ctrl0_clr);
  471. }
  472. /*
  473. * Initialize the DMA hardware
  474. */
  475. void mxs_dma_init(void)
  476. {
  477. struct mxs_apbh_regs *apbh_regs =
  478. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  479. mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
  480. #ifdef CONFIG_APBH_DMA_BURST8
  481. writel(APBH_CTRL0_AHB_BURST8_EN,
  482. &apbh_regs->hw_apbh_ctrl0_set);
  483. #else
  484. writel(APBH_CTRL0_AHB_BURST8_EN,
  485. &apbh_regs->hw_apbh_ctrl0_clr);
  486. #endif
  487. #ifdef CONFIG_APBH_DMA_BURST
  488. writel(APBH_CTRL0_APB_BURST_EN,
  489. &apbh_regs->hw_apbh_ctrl0_set);
  490. #else
  491. writel(APBH_CTRL0_APB_BURST_EN,
  492. &apbh_regs->hw_apbh_ctrl0_clr);
  493. #endif
  494. }
  495. int mxs_dma_init_channel(int channel)
  496. {
  497. struct mxs_dma_chan *pchan;
  498. int ret;
  499. pchan = mxs_dma_channels + channel;
  500. pchan->flags = MXS_DMA_FLAGS_VALID;
  501. ret = mxs_dma_request(channel);
  502. if (ret) {
  503. printf("MXS DMA: Can't acquire DMA channel %i\n",
  504. channel);
  505. return ret;
  506. }
  507. mxs_dma_reset(channel);
  508. mxs_dma_ack_irq(channel);
  509. return 0;
  510. }