fsl-edma-common.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
  4. // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
  5. #include <linux/dmapool.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/dma-mapping.h>
  9. #include "fsl-edma-common.h"
  10. #define EDMA_CR 0x00
  11. #define EDMA_ES 0x04
  12. #define EDMA_ERQ 0x0C
  13. #define EDMA_EEI 0x14
  14. #define EDMA_SERQ 0x1B
  15. #define EDMA_CERQ 0x1A
  16. #define EDMA_SEEI 0x19
  17. #define EDMA_CEEI 0x18
  18. #define EDMA_CINT 0x1F
  19. #define EDMA_CERR 0x1E
  20. #define EDMA_SSRT 0x1D
  21. #define EDMA_CDNE 0x1C
  22. #define EDMA_INTR 0x24
  23. #define EDMA_ERR 0x2C
  24. #define EDMA64_ERQH 0x08
  25. #define EDMA64_EEIH 0x10
  26. #define EDMA64_SERQ 0x18
  27. #define EDMA64_CERQ 0x19
  28. #define EDMA64_SEEI 0x1a
  29. #define EDMA64_CEEI 0x1b
  30. #define EDMA64_CINT 0x1c
  31. #define EDMA64_CERR 0x1d
  32. #define EDMA64_SSRT 0x1e
  33. #define EDMA64_CDNE 0x1f
  34. #define EDMA64_INTH 0x20
  35. #define EDMA64_INTL 0x24
  36. #define EDMA64_ERRH 0x28
  37. #define EDMA64_ERRL 0x2c
  38. #define EDMA_TCD 0x1000
  39. static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
  40. {
  41. struct edma_regs *regs = &fsl_chan->edma->regs;
  42. u32 ch = fsl_chan->vchan.chan.chan_id;
  43. if (fsl_chan->edma->drvdata->version == v1) {
  44. edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
  45. edma_writeb(fsl_chan->edma, ch, regs->serq);
  46. } else {
  47. /* ColdFire is big endian, and accesses natively
  48. * big endian I/O peripherals
  49. */
  50. iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
  51. iowrite8(ch, regs->serq);
  52. }
  53. }
  54. void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
  55. {
  56. struct edma_regs *regs = &fsl_chan->edma->regs;
  57. u32 ch = fsl_chan->vchan.chan.chan_id;
  58. if (fsl_chan->edma->drvdata->version == v1) {
  59. edma_writeb(fsl_chan->edma, ch, regs->cerq);
  60. edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
  61. } else {
  62. /* ColdFire is big endian, and accesses natively
  63. * big endian I/O peripherals
  64. */
  65. iowrite8(ch, regs->cerq);
  66. iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
  67. }
  68. }
  69. EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
  70. static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
  71. u32 off, u32 slot, bool enable)
  72. {
  73. u8 val8;
  74. if (enable)
  75. val8 = EDMAMUX_CHCFG_ENBL | slot;
  76. else
  77. val8 = EDMAMUX_CHCFG_DIS;
  78. iowrite8(val8, addr + off);
  79. }
  80. static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
  81. u32 off, u32 slot, bool enable)
  82. {
  83. u32 val;
  84. if (enable)
  85. val = EDMAMUX_CHCFG_ENBL << 24 | slot;
  86. else
  87. val = EDMAMUX_CHCFG_DIS;
  88. iowrite32(val, addr + off * 4);
  89. }
  90. void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
  91. unsigned int slot, bool enable)
  92. {
  93. u32 ch = fsl_chan->vchan.chan.chan_id;
  94. void __iomem *muxaddr;
  95. unsigned int chans_per_mux, ch_off;
  96. int endian_diff[4] = {3, 1, -1, -3};
  97. u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
  98. chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
  99. ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
  100. if (fsl_chan->edma->drvdata->mux_swap)
  101. ch_off += endian_diff[ch_off % 4];
  102. muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
  103. slot = EDMAMUX_CHCFG_SOURCE(slot);
  104. if (fsl_chan->edma->drvdata->version == v3)
  105. mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
  106. else
  107. mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
  108. }
  109. EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
  110. static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
  111. {
  112. switch (addr_width) {
  113. case 1:
  114. return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
  115. case 2:
  116. return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
  117. case 4:
  118. return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
  119. case 8:
  120. return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
  121. default:
  122. return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
  123. }
  124. }
  125. void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
  126. {
  127. struct fsl_edma_desc *fsl_desc;
  128. int i;
  129. fsl_desc = to_fsl_edma_desc(vdesc);
  130. for (i = 0; i < fsl_desc->n_tcds; i++)
  131. dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
  132. fsl_desc->tcd[i].ptcd);
  133. kfree(fsl_desc);
  134. }
  135. EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
  136. int fsl_edma_terminate_all(struct dma_chan *chan)
  137. {
  138. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  139. unsigned long flags;
  140. LIST_HEAD(head);
  141. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  142. fsl_edma_disable_request(fsl_chan);
  143. fsl_chan->edesc = NULL;
  144. fsl_chan->idle = true;
  145. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  146. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  147. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  148. return 0;
  149. }
  150. EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
  151. int fsl_edma_pause(struct dma_chan *chan)
  152. {
  153. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  154. unsigned long flags;
  155. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  156. if (fsl_chan->edesc) {
  157. fsl_edma_disable_request(fsl_chan);
  158. fsl_chan->status = DMA_PAUSED;
  159. fsl_chan->idle = true;
  160. }
  161. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  162. return 0;
  163. }
  164. EXPORT_SYMBOL_GPL(fsl_edma_pause);
  165. int fsl_edma_resume(struct dma_chan *chan)
  166. {
  167. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  168. unsigned long flags;
  169. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  170. if (fsl_chan->edesc) {
  171. fsl_edma_enable_request(fsl_chan);
  172. fsl_chan->status = DMA_IN_PROGRESS;
  173. fsl_chan->idle = false;
  174. }
  175. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(fsl_edma_resume);
  179. static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
  180. {
  181. if (fsl_chan->dma_dir != DMA_NONE)
  182. dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
  183. fsl_chan->dma_dev_addr,
  184. fsl_chan->dma_dev_size,
  185. fsl_chan->dma_dir, 0);
  186. fsl_chan->dma_dir = DMA_NONE;
  187. }
  188. static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
  189. enum dma_transfer_direction dir)
  190. {
  191. struct device *dev = fsl_chan->vchan.chan.device->dev;
  192. enum dma_data_direction dma_dir;
  193. phys_addr_t addr = 0;
  194. u32 size = 0;
  195. switch (dir) {
  196. case DMA_MEM_TO_DEV:
  197. dma_dir = DMA_FROM_DEVICE;
  198. addr = fsl_chan->cfg.dst_addr;
  199. size = fsl_chan->cfg.dst_maxburst;
  200. break;
  201. case DMA_DEV_TO_MEM:
  202. dma_dir = DMA_TO_DEVICE;
  203. addr = fsl_chan->cfg.src_addr;
  204. size = fsl_chan->cfg.src_maxburst;
  205. break;
  206. default:
  207. dma_dir = DMA_NONE;
  208. break;
  209. }
  210. /* Already mapped for this config? */
  211. if (fsl_chan->dma_dir == dma_dir)
  212. return true;
  213. fsl_edma_unprep_slave_dma(fsl_chan);
  214. fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
  215. if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
  216. return false;
  217. fsl_chan->dma_dev_size = size;
  218. fsl_chan->dma_dir = dma_dir;
  219. return true;
  220. }
  221. int fsl_edma_slave_config(struct dma_chan *chan,
  222. struct dma_slave_config *cfg)
  223. {
  224. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  225. memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
  226. fsl_edma_unprep_slave_dma(fsl_chan);
  227. return 0;
  228. }
  229. EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
  230. static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
  231. struct virt_dma_desc *vdesc, bool in_progress)
  232. {
  233. struct fsl_edma_desc *edesc = fsl_chan->edesc;
  234. struct edma_regs *regs = &fsl_chan->edma->regs;
  235. u32 ch = fsl_chan->vchan.chan.chan_id;
  236. enum dma_transfer_direction dir = edesc->dirn;
  237. dma_addr_t cur_addr, dma_addr;
  238. size_t len, size;
  239. int i;
  240. /* calculate the total size in this desc */
  241. for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
  242. len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
  243. * le16_to_cpu(edesc->tcd[i].vtcd->biter);
  244. if (!in_progress)
  245. return len;
  246. if (dir == DMA_MEM_TO_DEV)
  247. cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
  248. else
  249. cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
  250. /* figure out the finished and calculate the residue */
  251. for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
  252. size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
  253. * le16_to_cpu(edesc->tcd[i].vtcd->biter);
  254. if (dir == DMA_MEM_TO_DEV)
  255. dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
  256. else
  257. dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
  258. len -= size;
  259. if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
  260. len += dma_addr + size - cur_addr;
  261. break;
  262. }
  263. }
  264. return len;
  265. }
  266. enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
  267. dma_cookie_t cookie, struct dma_tx_state *txstate)
  268. {
  269. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  270. struct virt_dma_desc *vdesc;
  271. enum dma_status status;
  272. unsigned long flags;
  273. status = dma_cookie_status(chan, cookie, txstate);
  274. if (status == DMA_COMPLETE)
  275. return status;
  276. if (!txstate)
  277. return fsl_chan->status;
  278. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  279. vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
  280. if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
  281. txstate->residue =
  282. fsl_edma_desc_residue(fsl_chan, vdesc, true);
  283. else if (vdesc)
  284. txstate->residue =
  285. fsl_edma_desc_residue(fsl_chan, vdesc, false);
  286. else
  287. txstate->residue = 0;
  288. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  289. return fsl_chan->status;
  290. }
  291. EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
  292. static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
  293. struct fsl_edma_hw_tcd *tcd)
  294. {
  295. struct fsl_edma_engine *edma = fsl_chan->edma;
  296. struct edma_regs *regs = &fsl_chan->edma->regs;
  297. u32 ch = fsl_chan->vchan.chan.chan_id;
  298. /*
  299. * TCD parameters are stored in struct fsl_edma_hw_tcd in little
  300. * endian format. However, we need to load the TCD registers in
  301. * big- or little-endian obeying the eDMA engine model endian,
  302. * and this is performed from specific edma_write functions
  303. */
  304. edma_writew(edma, 0, &regs->tcd[ch].csr);
  305. edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
  306. edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
  307. edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
  308. edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
  309. edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
  310. edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
  311. edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
  312. edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
  313. edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
  314. edma_writel(edma, (s32)tcd->dlast_sga,
  315. &regs->tcd[ch].dlast_sga);
  316. edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
  317. }
  318. static inline
  319. void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
  320. u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
  321. u16 biter, u16 doff, u32 dlast_sga, bool major_int,
  322. bool disable_req, bool enable_sg)
  323. {
  324. u16 csr = 0;
  325. /*
  326. * eDMA hardware SGs require the TCDs to be stored in little
  327. * endian format irrespective of the register endian model.
  328. * So we put the value in little endian in memory, waiting
  329. * for fsl_edma_set_tcd_regs doing the swap.
  330. */
  331. tcd->saddr = cpu_to_le32(src);
  332. tcd->daddr = cpu_to_le32(dst);
  333. tcd->attr = cpu_to_le16(attr);
  334. tcd->soff = cpu_to_le16(soff);
  335. tcd->nbytes = cpu_to_le32(nbytes);
  336. tcd->slast = cpu_to_le32(slast);
  337. tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
  338. tcd->doff = cpu_to_le16(doff);
  339. tcd->dlast_sga = cpu_to_le32(dlast_sga);
  340. tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
  341. if (major_int)
  342. csr |= EDMA_TCD_CSR_INT_MAJOR;
  343. if (disable_req)
  344. csr |= EDMA_TCD_CSR_D_REQ;
  345. if (enable_sg)
  346. csr |= EDMA_TCD_CSR_E_SG;
  347. tcd->csr = cpu_to_le16(csr);
  348. }
  349. static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
  350. int sg_len)
  351. {
  352. struct fsl_edma_desc *fsl_desc;
  353. int i;
  354. fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
  355. if (!fsl_desc)
  356. return NULL;
  357. fsl_desc->echan = fsl_chan;
  358. fsl_desc->n_tcds = sg_len;
  359. for (i = 0; i < sg_len; i++) {
  360. fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
  361. GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
  362. if (!fsl_desc->tcd[i].vtcd)
  363. goto err;
  364. }
  365. return fsl_desc;
  366. err:
  367. while (--i >= 0)
  368. dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
  369. fsl_desc->tcd[i].ptcd);
  370. kfree(fsl_desc);
  371. return NULL;
  372. }
  373. struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
  374. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  375. size_t period_len, enum dma_transfer_direction direction,
  376. unsigned long flags)
  377. {
  378. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  379. struct fsl_edma_desc *fsl_desc;
  380. dma_addr_t dma_buf_next;
  381. int sg_len, i;
  382. u32 src_addr, dst_addr, last_sg, nbytes;
  383. u16 soff, doff, iter;
  384. if (!is_slave_direction(direction))
  385. return NULL;
  386. if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
  387. return NULL;
  388. sg_len = buf_len / period_len;
  389. fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
  390. if (!fsl_desc)
  391. return NULL;
  392. fsl_desc->iscyclic = true;
  393. fsl_desc->dirn = direction;
  394. dma_buf_next = dma_addr;
  395. if (direction == DMA_MEM_TO_DEV) {
  396. fsl_chan->attr =
  397. fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
  398. nbytes = fsl_chan->cfg.dst_addr_width *
  399. fsl_chan->cfg.dst_maxburst;
  400. } else {
  401. fsl_chan->attr =
  402. fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
  403. nbytes = fsl_chan->cfg.src_addr_width *
  404. fsl_chan->cfg.src_maxburst;
  405. }
  406. iter = period_len / nbytes;
  407. for (i = 0; i < sg_len; i++) {
  408. if (dma_buf_next >= dma_addr + buf_len)
  409. dma_buf_next = dma_addr;
  410. /* get next sg's physical address */
  411. last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
  412. if (direction == DMA_MEM_TO_DEV) {
  413. src_addr = dma_buf_next;
  414. dst_addr = fsl_chan->dma_dev_addr;
  415. soff = fsl_chan->cfg.dst_addr_width;
  416. doff = 0;
  417. } else {
  418. src_addr = fsl_chan->dma_dev_addr;
  419. dst_addr = dma_buf_next;
  420. soff = 0;
  421. doff = fsl_chan->cfg.src_addr_width;
  422. }
  423. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
  424. fsl_chan->attr, soff, nbytes, 0, iter,
  425. iter, doff, last_sg, true, false, true);
  426. dma_buf_next += period_len;
  427. }
  428. return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
  429. }
  430. EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
  431. struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
  432. struct dma_chan *chan, struct scatterlist *sgl,
  433. unsigned int sg_len, enum dma_transfer_direction direction,
  434. unsigned long flags, void *context)
  435. {
  436. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  437. struct fsl_edma_desc *fsl_desc;
  438. struct scatterlist *sg;
  439. u32 src_addr, dst_addr, last_sg, nbytes;
  440. u16 soff, doff, iter;
  441. int i;
  442. if (!is_slave_direction(direction))
  443. return NULL;
  444. if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
  445. return NULL;
  446. fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
  447. if (!fsl_desc)
  448. return NULL;
  449. fsl_desc->iscyclic = false;
  450. fsl_desc->dirn = direction;
  451. if (direction == DMA_MEM_TO_DEV) {
  452. fsl_chan->attr =
  453. fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
  454. nbytes = fsl_chan->cfg.dst_addr_width *
  455. fsl_chan->cfg.dst_maxburst;
  456. } else {
  457. fsl_chan->attr =
  458. fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
  459. nbytes = fsl_chan->cfg.src_addr_width *
  460. fsl_chan->cfg.src_maxburst;
  461. }
  462. for_each_sg(sgl, sg, sg_len, i) {
  463. /* get next sg's physical address */
  464. last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
  465. if (direction == DMA_MEM_TO_DEV) {
  466. src_addr = sg_dma_address(sg);
  467. dst_addr = fsl_chan->dma_dev_addr;
  468. soff = fsl_chan->cfg.dst_addr_width;
  469. doff = 0;
  470. } else {
  471. src_addr = fsl_chan->dma_dev_addr;
  472. dst_addr = sg_dma_address(sg);
  473. soff = 0;
  474. doff = fsl_chan->cfg.src_addr_width;
  475. }
  476. iter = sg_dma_len(sg) / nbytes;
  477. if (i < sg_len - 1) {
  478. last_sg = fsl_desc->tcd[(i + 1)].ptcd;
  479. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
  480. dst_addr, fsl_chan->attr, soff,
  481. nbytes, 0, iter, iter, doff, last_sg,
  482. false, false, true);
  483. } else {
  484. last_sg = 0;
  485. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
  486. dst_addr, fsl_chan->attr, soff,
  487. nbytes, 0, iter, iter, doff, last_sg,
  488. true, true, false);
  489. }
  490. }
  491. return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
  492. }
  493. EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
  494. void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
  495. {
  496. struct virt_dma_desc *vdesc;
  497. lockdep_assert_held(&fsl_chan->vchan.lock);
  498. vdesc = vchan_next_desc(&fsl_chan->vchan);
  499. if (!vdesc)
  500. return;
  501. fsl_chan->edesc = to_fsl_edma_desc(vdesc);
  502. fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
  503. fsl_edma_enable_request(fsl_chan);
  504. fsl_chan->status = DMA_IN_PROGRESS;
  505. fsl_chan->idle = false;
  506. }
  507. EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
  508. void fsl_edma_issue_pending(struct dma_chan *chan)
  509. {
  510. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  511. unsigned long flags;
  512. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  513. if (unlikely(fsl_chan->pm_state != RUNNING)) {
  514. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  515. /* cannot submit due to suspend */
  516. return;
  517. }
  518. if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
  519. fsl_edma_xfer_desc(fsl_chan);
  520. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  521. }
  522. EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
  523. int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
  524. {
  525. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  526. fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
  527. sizeof(struct fsl_edma_hw_tcd),
  528. 32, 0);
  529. return 0;
  530. }
  531. EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
  532. void fsl_edma_free_chan_resources(struct dma_chan *chan)
  533. {
  534. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  535. unsigned long flags;
  536. LIST_HEAD(head);
  537. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  538. fsl_edma_disable_request(fsl_chan);
  539. fsl_edma_chan_mux(fsl_chan, 0, false);
  540. fsl_chan->edesc = NULL;
  541. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  542. fsl_edma_unprep_slave_dma(fsl_chan);
  543. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  544. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  545. dma_pool_destroy(fsl_chan->tcd_pool);
  546. fsl_chan->tcd_pool = NULL;
  547. }
  548. EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
  549. void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
  550. {
  551. struct fsl_edma_chan *chan, *_chan;
  552. list_for_each_entry_safe(chan, _chan,
  553. &dmadev->channels, vchan.chan.device_node) {
  554. list_del(&chan->vchan.chan.device_node);
  555. tasklet_kill(&chan->vchan.task);
  556. }
  557. }
  558. EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
  559. /*
  560. * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
  561. * register offsets are different compared to ColdFire mcf5441x 64 channels
  562. * edma (here called "v2").
  563. *
  564. * This function sets up register offsets as per proper declared version
  565. * so must be called in xxx_edma_probe() just after setting the
  566. * edma "version" and "membase" appropriately.
  567. */
  568. void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
  569. {
  570. edma->regs.cr = edma->membase + EDMA_CR;
  571. edma->regs.es = edma->membase + EDMA_ES;
  572. edma->regs.erql = edma->membase + EDMA_ERQ;
  573. edma->regs.eeil = edma->membase + EDMA_EEI;
  574. edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
  575. EDMA64_SERQ : EDMA_SERQ);
  576. edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
  577. EDMA64_CERQ : EDMA_CERQ);
  578. edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
  579. EDMA64_SEEI : EDMA_SEEI);
  580. edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
  581. EDMA64_CEEI : EDMA_CEEI);
  582. edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
  583. EDMA64_CINT : EDMA_CINT);
  584. edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
  585. EDMA64_CERR : EDMA_CERR);
  586. edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
  587. EDMA64_SSRT : EDMA_SSRT);
  588. edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
  589. EDMA64_CDNE : EDMA_CDNE);
  590. edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
  591. EDMA64_INTL : EDMA_INTR);
  592. edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
  593. EDMA64_ERRL : EDMA_ERR);
  594. if (edma->drvdata->version == v2) {
  595. edma->regs.erqh = edma->membase + EDMA64_ERQH;
  596. edma->regs.eeih = edma->membase + EDMA64_EEIH;
  597. edma->regs.errh = edma->membase + EDMA64_ERRH;
  598. edma->regs.inth = edma->membase + EDMA64_INTH;
  599. }
  600. edma->regs.tcd = edma->membase + EDMA_TCD;
  601. }
  602. EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
  603. MODULE_LICENSE("GPL v2");