ti-edma3.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Enhanced Direct Memory Access (EDMA3) Controller
  4. *
  5. * (C) Copyright 2014
  6. * Texas Instruments Incorporated, <www.ti.com>
  7. *
  8. * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
  9. */
  10. #include <asm/cache.h>
  11. #include <asm/io.h>
  12. #include <common.h>
  13. #include <dm.h>
  14. #include <dma-uclass.h>
  15. #include <asm/omap_common.h>
  16. #include <asm/ti-common/ti-edma3.h>
  17. #define EDMA3_SL_BASE(slot) (0x4000 + ((slot) << 5))
  18. #define EDMA3_SL_MAX_NUM 512
  19. #define EDMA3_SLOPT_FIFO_WIDTH_MASK (0x7 << 8)
  20. #define EDMA3_QCHMAP(ch) 0x0200 + ((ch) << 2)
  21. #define EDMA3_CHMAP_PARSET_MASK 0x1ff
  22. #define EDMA3_CHMAP_PARSET_SHIFT 0x5
  23. #define EDMA3_CHMAP_TRIGWORD_SHIFT 0x2
  24. #define EDMA3_QEMCR 0x314
  25. #define EDMA3_IPR 0x1068
  26. #define EDMA3_IPRH 0x106c
  27. #define EDMA3_ICR 0x1070
  28. #define EDMA3_ICRH 0x1074
  29. #define EDMA3_QEECR 0x1088
  30. #define EDMA3_QEESR 0x108c
  31. #define EDMA3_QSECR 0x1094
  32. #define EDMA_FILL_BUFFER_SIZE 512
  33. struct ti_edma3_priv {
  34. u32 base;
  35. };
  36. static u8 edma_fill_buffer[EDMA_FILL_BUFFER_SIZE] __aligned(ARCH_DMA_MINALIGN);
  37. /**
  38. * qedma3_start - start qdma on a channel
  39. * @base: base address of edma
  40. * @cfg: pinter to struct edma3_channel_config where you can set
  41. * the slot number to associate with, the chnum, which corresponds
  42. * your quick channel number 0-7, complete code - transfer complete code
  43. * and trigger slot word - which has to correspond to the word number in
  44. * edma3_slot_layout struct for generating event.
  45. *
  46. */
  47. void qedma3_start(u32 base, struct edma3_channel_config *cfg)
  48. {
  49. u32 qchmap;
  50. /* Clear the pending int bit */
  51. if (cfg->complete_code < 32)
  52. __raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
  53. else
  54. __raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
  55. /* Map parameter set and trigger word 7 to quick channel */
  56. qchmap = ((EDMA3_CHMAP_PARSET_MASK & cfg->slot)
  57. << EDMA3_CHMAP_PARSET_SHIFT) |
  58. (cfg->trigger_slot_word << EDMA3_CHMAP_TRIGWORD_SHIFT);
  59. __raw_writel(qchmap, base + EDMA3_QCHMAP(cfg->chnum));
  60. /* Clear missed event if set*/
  61. __raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
  62. __raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
  63. /* Enable qdma channel event */
  64. __raw_writel(1 << cfg->chnum, base + EDMA3_QEESR);
  65. }
  66. /**
  67. * edma3_set_dest - set initial DMA destination address in parameter RAM slot
  68. * @base: base address of edma
  69. * @slot: parameter RAM slot being configured
  70. * @dst: physical address of destination (memory, controller FIFO, etc)
  71. * @addressMode: INCR, except in very rare cases
  72. * @width: ignored unless @addressMode is FIFO, else specifies the
  73. * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
  74. *
  75. * Note that the destination address is modified during the DMA transfer
  76. * according to edma3_set_dest_index().
  77. */
  78. void edma3_set_dest(u32 base, int slot, u32 dst, enum edma3_address_mode mode,
  79. enum edma3_fifo_width width)
  80. {
  81. u32 opt;
  82. struct edma3_slot_layout *rg;
  83. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  84. opt = __raw_readl(&rg->opt);
  85. if (mode == FIFO)
  86. opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
  87. (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
  88. EDMA3_SLOPT_FIFO_WIDTH_SET(width));
  89. else
  90. opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
  91. __raw_writel(opt, &rg->opt);
  92. __raw_writel(dst, &rg->dst);
  93. }
  94. /**
  95. * edma3_set_dest_index - configure DMA destination address indexing
  96. * @base: base address of edma
  97. * @slot: parameter RAM slot being configured
  98. * @bidx: byte offset between destination arrays in a frame
  99. * @cidx: byte offset between destination frames in a block
  100. *
  101. * Offsets are specified to support either contiguous or discontiguous
  102. * memory transfers, or repeated access to a hardware register, as needed.
  103. * When accessing hardware registers, both offsets are normally zero.
  104. */
  105. void edma3_set_dest_index(u32 base, unsigned slot, int bidx, int cidx)
  106. {
  107. u32 src_dst_bidx;
  108. u32 src_dst_cidx;
  109. struct edma3_slot_layout *rg;
  110. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  111. src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
  112. src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
  113. __raw_writel((src_dst_bidx & 0x0000ffff) | (bidx << 16),
  114. &rg->src_dst_bidx);
  115. __raw_writel((src_dst_cidx & 0x0000ffff) | (cidx << 16),
  116. &rg->src_dst_cidx);
  117. }
  118. /**
  119. * edma3_set_dest_addr - set destination address for slot only
  120. */
  121. void edma3_set_dest_addr(u32 base, int slot, u32 dst)
  122. {
  123. struct edma3_slot_layout *rg;
  124. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  125. __raw_writel(dst, &rg->dst);
  126. }
  127. /**
  128. * edma3_set_src - set initial DMA source address in parameter RAM slot
  129. * @base: base address of edma
  130. * @slot: parameter RAM slot being configured
  131. * @src_port: physical address of source (memory, controller FIFO, etc)
  132. * @mode: INCR, except in very rare cases
  133. * @width: ignored unless @addressMode is FIFO, else specifies the
  134. * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
  135. *
  136. * Note that the source address is modified during the DMA transfer
  137. * according to edma3_set_src_index().
  138. */
  139. void edma3_set_src(u32 base, int slot, u32 src, enum edma3_address_mode mode,
  140. enum edma3_fifo_width width)
  141. {
  142. u32 opt;
  143. struct edma3_slot_layout *rg;
  144. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  145. opt = __raw_readl(&rg->opt);
  146. if (mode == FIFO)
  147. opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
  148. (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
  149. EDMA3_SLOPT_FIFO_WIDTH_SET(width));
  150. else
  151. opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
  152. __raw_writel(opt, &rg->opt);
  153. __raw_writel(src, &rg->src);
  154. }
  155. /**
  156. * edma3_set_src_index - configure DMA source address indexing
  157. * @base: base address of edma
  158. * @slot: parameter RAM slot being configured
  159. * @bidx: byte offset between source arrays in a frame
  160. * @cidx: byte offset between source frames in a block
  161. *
  162. * Offsets are specified to support either contiguous or discontiguous
  163. * memory transfers, or repeated access to a hardware register, as needed.
  164. * When accessing hardware registers, both offsets are normally zero.
  165. */
  166. void edma3_set_src_index(u32 base, unsigned slot, int bidx, int cidx)
  167. {
  168. u32 src_dst_bidx;
  169. u32 src_dst_cidx;
  170. struct edma3_slot_layout *rg;
  171. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  172. src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
  173. src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
  174. __raw_writel((src_dst_bidx & 0xffff0000) | bidx,
  175. &rg->src_dst_bidx);
  176. __raw_writel((src_dst_cidx & 0xffff0000) | cidx,
  177. &rg->src_dst_cidx);
  178. }
  179. /**
  180. * edma3_set_src_addr - set source address for slot only
  181. */
  182. void edma3_set_src_addr(u32 base, int slot, u32 src)
  183. {
  184. struct edma3_slot_layout *rg;
  185. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  186. __raw_writel(src, &rg->src);
  187. }
  188. /**
  189. * edma3_set_transfer_params - configure DMA transfer parameters
  190. * @base: base address of edma
  191. * @slot: parameter RAM slot being configured
  192. * @acnt: how many bytes per array (at least one)
  193. * @bcnt: how many arrays per frame (at least one)
  194. * @ccnt: how many frames per block (at least one)
  195. * @bcnt_rld: used only for A-Synchronized transfers; this specifies
  196. * the value to reload into bcnt when it decrements to zero
  197. * @sync_mode: ASYNC or ABSYNC
  198. *
  199. * See the EDMA3 documentation to understand how to configure and link
  200. * transfers using the fields in PaRAM slots. If you are not doing it
  201. * all at once with edma3_write_slot(), you will use this routine
  202. * plus two calls each for source and destination, setting the initial
  203. * address and saying how to index that address.
  204. *
  205. * An example of an A-Synchronized transfer is a serial link using a
  206. * single word shift register. In that case, @acnt would be equal to
  207. * that word size; the serial controller issues a DMA synchronization
  208. * event to transfer each word, and memory access by the DMA transfer
  209. * controller will be word-at-a-time.
  210. *
  211. * An example of an AB-Synchronized transfer is a device using a FIFO.
  212. * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
  213. * The controller with the FIFO issues DMA synchronization events when
  214. * the FIFO threshold is reached, and the DMA transfer controller will
  215. * transfer one frame to (or from) the FIFO. It will probably use
  216. * efficient burst modes to access memory.
  217. */
  218. void edma3_set_transfer_params(u32 base, int slot, int acnt,
  219. int bcnt, int ccnt, u16 bcnt_rld,
  220. enum edma3_sync_dimension sync_mode)
  221. {
  222. u32 opt;
  223. u32 link_bcntrld;
  224. struct edma3_slot_layout *rg;
  225. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  226. link_bcntrld = __raw_readl(&rg->link_bcntrld);
  227. __raw_writel((bcnt_rld << 16) | (0x0000ffff & link_bcntrld),
  228. &rg->link_bcntrld);
  229. opt = __raw_readl(&rg->opt);
  230. if (sync_mode == ASYNC)
  231. __raw_writel(opt & ~EDMA3_SLOPT_AB_SYNC, &rg->opt);
  232. else
  233. __raw_writel(opt | EDMA3_SLOPT_AB_SYNC, &rg->opt);
  234. /* Set the acount, bcount, ccount registers */
  235. __raw_writel((bcnt << 16) | (acnt & 0xffff), &rg->a_b_cnt);
  236. __raw_writel(0xffff & ccnt, &rg->ccnt);
  237. }
  238. /**
  239. * edma3_write_slot - write parameter RAM data for slot
  240. * @base: base address of edma
  241. * @slot: number of parameter RAM slot being modified
  242. * @param: data to be written into parameter RAM slot
  243. *
  244. * Use this to assign all parameters of a transfer at once. This
  245. * allows more efficient setup of transfers than issuing multiple
  246. * calls to set up those parameters in small pieces, and provides
  247. * complete control over all transfer options.
  248. */
  249. void edma3_write_slot(u32 base, int slot, struct edma3_slot_layout *param)
  250. {
  251. int i;
  252. u32 *p = (u32 *)param;
  253. u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
  254. for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
  255. __raw_writel(*p++, addr++);
  256. }
  257. /**
  258. * edma3_read_slot - read parameter RAM data from slot
  259. * @base: base address of edma
  260. * @slot: number of parameter RAM slot being copied
  261. * @param: where to store copy of parameter RAM data
  262. *
  263. * Use this to read data from a parameter RAM slot, perhaps to
  264. * save them as a template for later reuse.
  265. */
  266. void edma3_read_slot(u32 base, int slot, struct edma3_slot_layout *param)
  267. {
  268. int i;
  269. u32 *p = (u32 *)param;
  270. u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
  271. for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
  272. *p++ = __raw_readl(addr++);
  273. }
  274. void edma3_slot_configure(u32 base, int slot, struct edma3_slot_config *cfg)
  275. {
  276. struct edma3_slot_layout *rg;
  277. rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
  278. __raw_writel(cfg->opt, &rg->opt);
  279. __raw_writel(cfg->src, &rg->src);
  280. __raw_writel((cfg->bcnt << 16) | (cfg->acnt & 0xffff), &rg->a_b_cnt);
  281. __raw_writel(cfg->dst, &rg->dst);
  282. __raw_writel((cfg->dst_bidx << 16) |
  283. (cfg->src_bidx & 0xffff), &rg->src_dst_bidx);
  284. __raw_writel((cfg->bcntrld << 16) |
  285. (cfg->link & 0xffff), &rg->link_bcntrld);
  286. __raw_writel((cfg->dst_cidx << 16) |
  287. (cfg->src_cidx & 0xffff), &rg->src_dst_cidx);
  288. __raw_writel(0xffff & cfg->ccnt, &rg->ccnt);
  289. }
  290. /**
  291. * edma3_check_for_transfer - check if transfer coplete by checking
  292. * interrupt pending bit. Clear interrupt pending bit if complete.
  293. * @base: base address of edma
  294. * @cfg: pinter to struct edma3_channel_config which was passed
  295. * to qedma3_start when you started qdma channel
  296. *
  297. * Return 0 if complete, 1 if not.
  298. */
  299. int edma3_check_for_transfer(u32 base, struct edma3_channel_config *cfg)
  300. {
  301. u32 inum;
  302. u32 ipr_base;
  303. u32 icr_base;
  304. if (cfg->complete_code < 32) {
  305. ipr_base = base + EDMA3_IPR;
  306. icr_base = base + EDMA3_ICR;
  307. inum = 1 << cfg->complete_code;
  308. } else {
  309. ipr_base = base + EDMA3_IPRH;
  310. icr_base = base + EDMA3_ICRH;
  311. inum = 1 << (cfg->complete_code - 32);
  312. }
  313. /* check complete interrupt */
  314. if (!(__raw_readl(ipr_base) & inum))
  315. return 1;
  316. /* clean up the pending int bit */
  317. __raw_writel(inum, icr_base);
  318. return 0;
  319. }
  320. /**
  321. * qedma3_stop - stops dma on the channel passed
  322. * @base: base address of edma
  323. * @cfg: pinter to struct edma3_channel_config which was passed
  324. * to qedma3_start when you started qdma channel
  325. */
  326. void qedma3_stop(u32 base, struct edma3_channel_config *cfg)
  327. {
  328. /* Disable qdma channel event */
  329. __raw_writel(1 << cfg->chnum, base + EDMA3_QEECR);
  330. /* clean up the interrupt indication */
  331. if (cfg->complete_code < 32)
  332. __raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
  333. else
  334. __raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
  335. /* Clear missed event if set*/
  336. __raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
  337. __raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
  338. /* Clear the channel map */
  339. __raw_writel(0, base + EDMA3_QCHMAP(cfg->chnum));
  340. }
  341. void __edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
  342. void *dst, void *src, size_t len, size_t s_len)
  343. {
  344. struct edma3_slot_config slot;
  345. struct edma3_channel_config edma_channel;
  346. int b_cnt_value = 1;
  347. int rem_bytes = 0;
  348. int a_cnt_value = len;
  349. unsigned int addr = (unsigned int) (dst);
  350. unsigned int max_acnt = 0x7FFFU;
  351. if (len > s_len) {
  352. b_cnt_value = (len / s_len);
  353. rem_bytes = (len % s_len);
  354. a_cnt_value = s_len;
  355. } else if (len > max_acnt) {
  356. b_cnt_value = (len / max_acnt);
  357. rem_bytes = (len % max_acnt);
  358. a_cnt_value = max_acnt;
  359. }
  360. slot.opt = 0;
  361. slot.src = ((unsigned int) src);
  362. slot.acnt = a_cnt_value;
  363. slot.bcnt = b_cnt_value;
  364. slot.ccnt = 1;
  365. if (len == s_len)
  366. slot.src_bidx = a_cnt_value;
  367. else
  368. slot.src_bidx = 0;
  369. slot.dst_bidx = a_cnt_value;
  370. slot.src_cidx = 0;
  371. slot.dst_cidx = 0;
  372. slot.link = EDMA3_PARSET_NULL_LINK;
  373. slot.bcntrld = 0;
  374. slot.opt = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
  375. EDMA3_SLOPT_COMP_CODE(0) |
  376. EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
  377. edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
  378. edma_channel.slot = edma_slot_num;
  379. edma_channel.chnum = 0;
  380. edma_channel.complete_code = 0;
  381. /* set event trigger to dst update */
  382. edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
  383. qedma3_start(edma3_base_addr, &edma_channel);
  384. edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr);
  385. while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
  386. ;
  387. qedma3_stop(edma3_base_addr, &edma_channel);
  388. if (rem_bytes != 0) {
  389. slot.opt = 0;
  390. if (len == s_len)
  391. slot.src =
  392. (b_cnt_value * max_acnt) + ((unsigned int) src);
  393. else
  394. slot.src = (unsigned int) src;
  395. slot.acnt = rem_bytes;
  396. slot.bcnt = 1;
  397. slot.ccnt = 1;
  398. slot.src_bidx = rem_bytes;
  399. slot.dst_bidx = rem_bytes;
  400. slot.src_cidx = 0;
  401. slot.dst_cidx = 0;
  402. slot.link = EDMA3_PARSET_NULL_LINK;
  403. slot.bcntrld = 0;
  404. slot.opt = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
  405. EDMA3_SLOPT_COMP_CODE(0) |
  406. EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
  407. edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
  408. edma_channel.slot = edma_slot_num;
  409. edma_channel.chnum = 0;
  410. edma_channel.complete_code = 0;
  411. /* set event trigger to dst update */
  412. edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
  413. qedma3_start(edma3_base_addr, &edma_channel);
  414. edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr +
  415. (max_acnt * b_cnt_value));
  416. while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
  417. ;
  418. qedma3_stop(edma3_base_addr, &edma_channel);
  419. }
  420. }
  421. void __edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
  422. void *dst, u8 val, size_t len)
  423. {
  424. int xfer_len;
  425. int max_xfer = EDMA_FILL_BUFFER_SIZE * 65535;
  426. memset((void *)edma_fill_buffer, val, sizeof(edma_fill_buffer));
  427. while (len) {
  428. xfer_len = len;
  429. if (xfer_len > max_xfer)
  430. xfer_len = max_xfer;
  431. __edma3_transfer(edma3_base_addr, edma_slot_num, dst,
  432. edma_fill_buffer, xfer_len,
  433. EDMA_FILL_BUFFER_SIZE);
  434. len -= xfer_len;
  435. dst += xfer_len;
  436. }
  437. }
  438. #ifndef CONFIG_DMA
  439. void edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
  440. void *dst, void *src, size_t len)
  441. {
  442. __edma3_transfer(edma3_base_addr, edma_slot_num, dst, src, len, len);
  443. }
  444. void edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
  445. void *dst, u8 val, size_t len)
  446. {
  447. __edma3_fill(edma3_base_addr, edma_slot_num, dst, val, len);
  448. }
  449. #else
  450. static int ti_edma3_transfer(struct udevice *dev, int direction, void *dst,
  451. void *src, size_t len)
  452. {
  453. struct ti_edma3_priv *priv = dev_get_priv(dev);
  454. /* enable edma3 clocks */
  455. enable_edma3_clocks();
  456. switch (direction) {
  457. case DMA_MEM_TO_MEM:
  458. __edma3_transfer(priv->base, 1, dst, src, len, len);
  459. break;
  460. default:
  461. pr_err("Transfer type not implemented in DMA driver\n");
  462. break;
  463. }
  464. /* disable edma3 clocks */
  465. disable_edma3_clocks();
  466. return 0;
  467. }
  468. static int ti_edma3_of_to_plat(struct udevice *dev)
  469. {
  470. struct ti_edma3_priv *priv = dev_get_priv(dev);
  471. priv->base = dev_read_addr(dev);
  472. return 0;
  473. }
  474. static int ti_edma3_probe(struct udevice *dev)
  475. {
  476. struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
  477. uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM;
  478. return 0;
  479. }
  480. static const struct dma_ops ti_edma3_ops = {
  481. .transfer = ti_edma3_transfer,
  482. };
  483. static const struct udevice_id ti_edma3_ids[] = {
  484. { .compatible = "ti,edma3" },
  485. { }
  486. };
  487. U_BOOT_DRIVER(ti_edma3) = {
  488. .name = "ti_edma3",
  489. .id = UCLASS_DMA,
  490. .of_match = ti_edma3_ids,
  491. .ops = &ti_edma3_ops,
  492. .of_to_plat = ti_edma3_of_to_plat,
  493. .probe = ti_edma3_probe,
  494. .priv_auto = sizeof(struct ti_edma3_priv),
  495. };
  496. #endif /* CONFIG_DMA */