apbh_dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Freescale i.MX28 APBH DMA driver
  4. *
  5. * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
  6. * on behalf of DENX Software Engineering GmbH
  7. *
  8. * Based on code from LTIB:
  9. * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
  10. * Copyright 2017 NXP
  11. *
  12. */
  13. #include <cpu_func.h>
  14. #include <linux/list.h>
  15. #include <common.h>
  16. #include <malloc.h>
  17. #include <linux/errno.h>
  18. #include <asm/io.h>
  19. #include <asm/arch/clock.h>
  20. #include <asm/arch/imx-regs.h>
  21. #include <asm/arch/sys_proto.h>
  22. #include <asm/mach-imx/dma.h>
  23. #include <asm/mach-imx/regs-apbh.h>
  24. static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
  25. /*
  26. * Test is the DMA channel is valid channel
  27. */
  28. int mxs_dma_validate_chan(int channel)
  29. {
  30. struct mxs_dma_chan *pchan;
  31. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  32. return -EINVAL;
  33. pchan = mxs_dma_channels + channel;
  34. if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
  35. return -EINVAL;
  36. return 0;
  37. }
  38. /*
  39. * Return the address of the command within a descriptor.
  40. */
  41. static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
  42. {
  43. return desc->address + offsetof(struct mxs_dma_desc, cmd);
  44. }
  45. /*
  46. * Read a DMA channel's hardware semaphore.
  47. *
  48. * As used by the MXS platform's DMA software, the DMA channel's hardware
  49. * semaphore reflects the number of DMA commands the hardware will process, but
  50. * has not yet finished. This is a volatile value read directly from hardware,
  51. * so it must be be viewed as immediately stale.
  52. *
  53. * If the channel is not marked busy, or has finished processing all its
  54. * commands, this value should be zero.
  55. *
  56. * See mxs_dma_append() for details on how DMA command blocks must be configured
  57. * to maintain the expected behavior of the semaphore's value.
  58. */
  59. static int mxs_dma_read_semaphore(int channel)
  60. {
  61. struct mxs_apbh_regs *apbh_regs =
  62. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  63. uint32_t tmp;
  64. int ret;
  65. ret = mxs_dma_validate_chan(channel);
  66. if (ret)
  67. return ret;
  68. tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
  69. tmp &= APBH_CHn_SEMA_PHORE_MASK;
  70. tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
  71. return tmp;
  72. }
  73. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  74. void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
  75. {
  76. uint32_t addr;
  77. uint32_t size;
  78. addr = (uintptr_t)desc;
  79. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  80. flush_dcache_range(addr, addr + size);
  81. }
  82. #else
  83. inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
  84. #endif
  85. /*
  86. * Enable a DMA channel.
  87. *
  88. * If the given channel has any DMA descriptors on its active list, this
  89. * function causes the DMA hardware to begin processing them.
  90. *
  91. * This function marks the DMA channel as "busy," whether or not there are any
  92. * descriptors to process.
  93. */
  94. static int mxs_dma_enable(int channel)
  95. {
  96. struct mxs_apbh_regs *apbh_regs =
  97. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  98. unsigned int sem;
  99. struct mxs_dma_chan *pchan;
  100. struct mxs_dma_desc *pdesc;
  101. int ret;
  102. ret = mxs_dma_validate_chan(channel);
  103. if (ret)
  104. return ret;
  105. pchan = mxs_dma_channels + channel;
  106. if (pchan->pending_num == 0) {
  107. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  108. return 0;
  109. }
  110. pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
  111. if (pdesc == NULL)
  112. return -EFAULT;
  113. if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
  114. if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
  115. return 0;
  116. sem = mxs_dma_read_semaphore(channel);
  117. if (sem == 0)
  118. return 0;
  119. if (sem == 1) {
  120. pdesc = list_entry(pdesc->node.next,
  121. struct mxs_dma_desc, node);
  122. writel(mxs_dma_cmd_address(pdesc),
  123. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  124. }
  125. writel(pchan->pending_num,
  126. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  127. pchan->active_num += pchan->pending_num;
  128. pchan->pending_num = 0;
  129. } else {
  130. pchan->active_num += pchan->pending_num;
  131. pchan->pending_num = 0;
  132. writel(mxs_dma_cmd_address(pdesc),
  133. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  134. writel(pchan->active_num,
  135. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  136. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  137. &apbh_regs->hw_apbh_ctrl0_clr);
  138. }
  139. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  140. return 0;
  141. }
  142. /*
  143. * Disable a DMA channel.
  144. *
  145. * This function shuts down a DMA channel and marks it as "not busy." Any
  146. * descriptors on the active list are immediately moved to the head of the
  147. * "done" list, whether or not they have actually been processed by the
  148. * hardware. The "ready" flags of these descriptors are NOT cleared, so they
  149. * still appear to be active.
  150. *
  151. * This function immediately shuts down a DMA channel's hardware, aborting any
  152. * I/O that may be in progress, potentially leaving I/O hardware in an undefined
  153. * state. It is unwise to call this function if there is ANY chance the hardware
  154. * is still processing a command.
  155. */
  156. static int mxs_dma_disable(int channel)
  157. {
  158. struct mxs_dma_chan *pchan;
  159. struct mxs_apbh_regs *apbh_regs =
  160. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  161. int ret;
  162. ret = mxs_dma_validate_chan(channel);
  163. if (ret)
  164. return ret;
  165. pchan = mxs_dma_channels + channel;
  166. if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
  167. return -EINVAL;
  168. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  169. &apbh_regs->hw_apbh_ctrl0_set);
  170. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  171. pchan->active_num = 0;
  172. pchan->pending_num = 0;
  173. list_splice_init(&pchan->active, &pchan->done);
  174. return 0;
  175. }
  176. /*
  177. * Resets the DMA channel hardware.
  178. */
  179. static int mxs_dma_reset(int channel)
  180. {
  181. struct mxs_apbh_regs *apbh_regs =
  182. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  183. int ret;
  184. #if defined(CONFIG_MX23)
  185. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
  186. uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
  187. #elif defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7) || \
  188. defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)
  189. u32 setreg = (uintptr_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
  190. u32 offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
  191. #endif
  192. ret = mxs_dma_validate_chan(channel);
  193. if (ret)
  194. return ret;
  195. writel(1 << (channel + offset), (uintptr_t)setreg);
  196. return 0;
  197. }
  198. /*
  199. * Enable or disable DMA interrupt.
  200. *
  201. * This function enables the given DMA channel to interrupt the CPU.
  202. */
  203. static int mxs_dma_enable_irq(int channel, int enable)
  204. {
  205. struct mxs_apbh_regs *apbh_regs =
  206. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  207. int ret;
  208. ret = mxs_dma_validate_chan(channel);
  209. if (ret)
  210. return ret;
  211. if (enable)
  212. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  213. &apbh_regs->hw_apbh_ctrl1_set);
  214. else
  215. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  216. &apbh_regs->hw_apbh_ctrl1_clr);
  217. return 0;
  218. }
  219. /*
  220. * Clear DMA interrupt.
  221. *
  222. * The software that is using the DMA channel must register to receive its
  223. * interrupts and, when they arrive, must call this function to clear them.
  224. */
  225. static int mxs_dma_ack_irq(int channel)
  226. {
  227. struct mxs_apbh_regs *apbh_regs =
  228. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  229. int ret;
  230. ret = mxs_dma_validate_chan(channel);
  231. if (ret)
  232. return ret;
  233. writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
  234. writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
  235. return 0;
  236. }
  237. /*
  238. * Request to reserve a DMA channel
  239. */
  240. static int mxs_dma_request(int channel)
  241. {
  242. struct mxs_dma_chan *pchan;
  243. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  244. return -EINVAL;
  245. pchan = mxs_dma_channels + channel;
  246. if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
  247. return -ENODEV;
  248. if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
  249. return -EBUSY;
  250. pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
  251. pchan->active_num = 0;
  252. pchan->pending_num = 0;
  253. INIT_LIST_HEAD(&pchan->active);
  254. INIT_LIST_HEAD(&pchan->done);
  255. return 0;
  256. }
  257. /*
  258. * Release a DMA channel.
  259. *
  260. * This function releases a DMA channel from its current owner.
  261. *
  262. * The channel will NOT be released if it's marked "busy" (see
  263. * mxs_dma_enable()).
  264. */
  265. int mxs_dma_release(int channel)
  266. {
  267. struct mxs_dma_chan *pchan;
  268. int ret;
  269. ret = mxs_dma_validate_chan(channel);
  270. if (ret)
  271. return ret;
  272. pchan = mxs_dma_channels + channel;
  273. if (pchan->flags & MXS_DMA_FLAGS_BUSY)
  274. return -EBUSY;
  275. pchan->dev = 0;
  276. pchan->active_num = 0;
  277. pchan->pending_num = 0;
  278. pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
  279. return 0;
  280. }
  281. /*
  282. * Allocate DMA descriptor
  283. */
  284. struct mxs_dma_desc *mxs_dma_desc_alloc(void)
  285. {
  286. struct mxs_dma_desc *pdesc;
  287. uint32_t size;
  288. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  289. pdesc = memalign(MXS_DMA_ALIGNMENT, size);
  290. if (pdesc == NULL)
  291. return NULL;
  292. memset(pdesc, 0, sizeof(*pdesc));
  293. pdesc->address = (dma_addr_t)pdesc;
  294. return pdesc;
  295. };
  296. /*
  297. * Free DMA descriptor
  298. */
  299. void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
  300. {
  301. if (pdesc == NULL)
  302. return;
  303. free(pdesc);
  304. }
  305. /*
  306. * Add a DMA descriptor to a channel.
  307. *
  308. * If the descriptor list for this channel is not empty, this function sets the
  309. * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
  310. * it will chain to the new descriptor's command.
  311. *
  312. * Then, this function marks the new descriptor as "ready," adds it to the end
  313. * of the active descriptor list, and increments the count of pending
  314. * descriptors.
  315. *
  316. * The MXS platform DMA software imposes some rules on DMA commands to maintain
  317. * important invariants. These rules are NOT checked, but they must be carefully
  318. * applied by software that uses MXS DMA channels.
  319. *
  320. * Invariant:
  321. * The DMA channel's hardware semaphore must reflect the number of DMA
  322. * commands the hardware will process, but has not yet finished.
  323. *
  324. * Explanation:
  325. * A DMA channel begins processing commands when its hardware semaphore is
  326. * written with a value greater than zero, and it stops processing commands
  327. * when the semaphore returns to zero.
  328. *
  329. * When a channel finishes a DMA command, it will decrement its semaphore if
  330. * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
  331. *
  332. * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
  333. * unless it suits the purposes of the software. For example, one could
  334. * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
  335. * bit set only in the last one. Then, setting the DMA channel's hardware
  336. * semaphore to one would cause the entire series of five commands to be
  337. * processed. However, this example would violate the invariant given above.
  338. *
  339. * Rule:
  340. * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
  341. * channel's hardware semaphore will be decremented EVERY time a command is
  342. * processed.
  343. */
  344. int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
  345. {
  346. struct mxs_dma_chan *pchan;
  347. struct mxs_dma_desc *last;
  348. int ret;
  349. ret = mxs_dma_validate_chan(channel);
  350. if (ret)
  351. return ret;
  352. pchan = mxs_dma_channels + channel;
  353. pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
  354. pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
  355. if (!list_empty(&pchan->active)) {
  356. last = list_entry(pchan->active.prev, struct mxs_dma_desc,
  357. node);
  358. pdesc->flags &= ~MXS_DMA_DESC_FIRST;
  359. last->flags &= ~MXS_DMA_DESC_LAST;
  360. last->cmd.next = mxs_dma_cmd_address(pdesc);
  361. last->cmd.data |= MXS_DMA_DESC_CHAIN;
  362. mxs_dma_flush_desc(last);
  363. }
  364. pdesc->flags |= MXS_DMA_DESC_READY;
  365. if (pdesc->flags & MXS_DMA_DESC_FIRST)
  366. pchan->pending_num++;
  367. list_add_tail(&pdesc->node, &pchan->active);
  368. mxs_dma_flush_desc(pdesc);
  369. return ret;
  370. }
  371. /*
  372. * Clean up processed DMA descriptors.
  373. *
  374. * This function removes processed DMA descriptors from the "active" list. Pass
  375. * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
  376. * to get the descriptors moved to the channel's "done" list. Descriptors on
  377. * the "done" list can be retrieved with mxs_dma_get_finished().
  378. *
  379. * This function marks the DMA channel as "not busy" if no unprocessed
  380. * descriptors remain on the "active" list.
  381. */
  382. static int mxs_dma_finish(int channel, struct list_head *head)
  383. {
  384. int sem;
  385. struct mxs_dma_chan *pchan;
  386. struct list_head *p, *q;
  387. struct mxs_dma_desc *pdesc;
  388. int ret;
  389. ret = mxs_dma_validate_chan(channel);
  390. if (ret)
  391. return ret;
  392. pchan = mxs_dma_channels + channel;
  393. sem = mxs_dma_read_semaphore(channel);
  394. if (sem < 0)
  395. return sem;
  396. if (sem == pchan->active_num)
  397. return 0;
  398. list_for_each_safe(p, q, &pchan->active) {
  399. if ((pchan->active_num) <= sem)
  400. break;
  401. pdesc = list_entry(p, struct mxs_dma_desc, node);
  402. pdesc->flags &= ~MXS_DMA_DESC_READY;
  403. if (head)
  404. list_move_tail(p, head);
  405. else
  406. list_move_tail(p, &pchan->done);
  407. if (pdesc->flags & MXS_DMA_DESC_LAST)
  408. pchan->active_num--;
  409. }
  410. if (sem == 0)
  411. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  412. return 0;
  413. }
  414. /*
  415. * Wait for DMA channel to complete
  416. */
  417. static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
  418. {
  419. struct mxs_apbh_regs *apbh_regs =
  420. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  421. int ret;
  422. ret = mxs_dma_validate_chan(chan);
  423. if (ret)
  424. return ret;
  425. if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
  426. 1 << chan, timeout)) {
  427. ret = -ETIMEDOUT;
  428. mxs_dma_reset(chan);
  429. }
  430. return ret;
  431. }
  432. /*
  433. * Execute the DMA channel
  434. */
  435. int mxs_dma_go(int chan)
  436. {
  437. uint32_t timeout = 10000000;
  438. int ret;
  439. LIST_HEAD(tmp_desc_list);
  440. mxs_dma_enable_irq(chan, 1);
  441. mxs_dma_enable(chan);
  442. /* Wait for DMA to finish. */
  443. ret = mxs_dma_wait_complete(timeout, chan);
  444. /* Clear out the descriptors we just ran. */
  445. mxs_dma_finish(chan, &tmp_desc_list);
  446. /* Shut the DMA channel down. */
  447. mxs_dma_ack_irq(chan);
  448. mxs_dma_reset(chan);
  449. mxs_dma_enable_irq(chan, 0);
  450. mxs_dma_disable(chan);
  451. return ret;
  452. }
  453. /*
  454. * Execute a continuously running circular DMA descriptor.
  455. * NOTE: This is not intended for general use, but rather
  456. * for the LCD driver in Smart-LCD mode. It allows
  457. * continuous triggering of the RUN bit there.
  458. */
  459. void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc)
  460. {
  461. struct mxs_apbh_regs *apbh_regs =
  462. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  463. mxs_dma_flush_desc(pdesc);
  464. mxs_dma_enable_irq(chan, 1);
  465. writel(mxs_dma_cmd_address(pdesc),
  466. &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar);
  467. writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema);
  468. writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  469. &apbh_regs->hw_apbh_ctrl0_clr);
  470. }
  471. /*
  472. * Initialize the DMA hardware
  473. */
  474. void mxs_dma_init(void)
  475. {
  476. struct mxs_apbh_regs *apbh_regs =
  477. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  478. mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
  479. #ifdef CONFIG_APBH_DMA_BURST8
  480. writel(APBH_CTRL0_AHB_BURST8_EN,
  481. &apbh_regs->hw_apbh_ctrl0_set);
  482. #else
  483. writel(APBH_CTRL0_AHB_BURST8_EN,
  484. &apbh_regs->hw_apbh_ctrl0_clr);
  485. #endif
  486. #ifdef CONFIG_APBH_DMA_BURST
  487. writel(APBH_CTRL0_APB_BURST_EN,
  488. &apbh_regs->hw_apbh_ctrl0_set);
  489. #else
  490. writel(APBH_CTRL0_APB_BURST_EN,
  491. &apbh_regs->hw_apbh_ctrl0_clr);
  492. #endif
  493. }
  494. int mxs_dma_init_channel(int channel)
  495. {
  496. struct mxs_dma_chan *pchan;
  497. int ret;
  498. pchan = mxs_dma_channels + channel;
  499. pchan->flags = MXS_DMA_FLAGS_VALID;
  500. ret = mxs_dma_request(channel);
  501. if (ret) {
  502. printf("MXS DMA: Can't acquire DMA channel %i\n",
  503. channel);
  504. return ret;
  505. }
  506. mxs_dma_reset(channel);
  507. mxs_dma_ack_irq(channel);
  508. return 0;
  509. }