tsi721_dma.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
  4. *
  5. * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
  6. * Alexandre Bounine <alexandre.bounine@idt.com>
  7. */
  8. #include <linux/io.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. #include <linux/ioport.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/pci.h>
  15. #include <linux/rio.h>
  16. #include <linux/rio_drv.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/kfifo.h>
  20. #include <linux/sched.h>
  21. #include <linux/delay.h>
  22. #include "../../dma/dmaengine.h"
  23. #include "tsi721.h"
  24. #ifdef CONFIG_PCI_MSI
  25. static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
  26. #endif
  27. static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
  28. static unsigned int dma_desc_per_channel = 128;
  29. module_param(dma_desc_per_channel, uint, S_IRUGO);
  30. MODULE_PARM_DESC(dma_desc_per_channel,
  31. "Number of DMA descriptors per channel (default: 128)");
  32. static unsigned int dma_txqueue_sz = 16;
  33. module_param(dma_txqueue_sz, uint, S_IRUGO);
  34. MODULE_PARM_DESC(dma_txqueue_sz,
  35. "DMA Transactions Queue Size (default: 16)");
  36. static u8 dma_sel = 0x7f;
  37. module_param(dma_sel, byte, S_IRUGO);
  38. MODULE_PARM_DESC(dma_sel,
  39. "DMA Channel Selection Mask (default: 0x7f = all)");
  40. static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
  41. {
  42. return container_of(chan, struct tsi721_bdma_chan, dchan);
  43. }
  44. static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
  45. {
  46. return container_of(ddev, struct rio_mport, dma)->priv;
  47. }
  48. static inline
  49. struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
  50. {
  51. return container_of(txd, struct tsi721_tx_desc, txd);
  52. }
  53. static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
  54. {
  55. struct tsi721_dma_desc *bd_ptr;
  56. struct device *dev = bdma_chan->dchan.device->dev;
  57. u64 *sts_ptr;
  58. dma_addr_t bd_phys;
  59. dma_addr_t sts_phys;
  60. int sts_size;
  61. #ifdef CONFIG_PCI_MSI
  62. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  63. #endif
  64. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  65. /*
  66. * Allocate space for DMA descriptors
  67. * (add an extra element for link descriptor)
  68. */
  69. bd_ptr = dma_alloc_coherent(dev,
  70. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  71. &bd_phys, GFP_ATOMIC);
  72. if (!bd_ptr)
  73. return -ENOMEM;
  74. bdma_chan->bd_num = bd_num;
  75. bdma_chan->bd_phys = bd_phys;
  76. bdma_chan->bd_base = bd_ptr;
  77. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  78. "DMAC%d descriptors @ %p (phys = %pad)",
  79. bdma_chan->id, bd_ptr, &bd_phys);
  80. /* Allocate space for descriptor status FIFO */
  81. sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
  82. (bd_num + 1) : TSI721_DMA_MINSTSSZ;
  83. sts_size = roundup_pow_of_two(sts_size);
  84. sts_ptr = dma_alloc_coherent(dev,
  85. sts_size * sizeof(struct tsi721_dma_sts),
  86. &sts_phys, GFP_ATOMIC);
  87. if (!sts_ptr) {
  88. /* Free space allocated for DMA descriptors */
  89. dma_free_coherent(dev,
  90. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  91. bd_ptr, bd_phys);
  92. bdma_chan->bd_base = NULL;
  93. return -ENOMEM;
  94. }
  95. bdma_chan->sts_phys = sts_phys;
  96. bdma_chan->sts_base = sts_ptr;
  97. bdma_chan->sts_size = sts_size;
  98. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  99. "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
  100. bdma_chan->id, sts_ptr, &sts_phys, sts_size);
  101. /* Initialize DMA descriptors ring using added link descriptor */
  102. bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
  103. bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
  104. TSI721_DMAC_DPTRL_MASK);
  105. bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
  106. /* Setup DMA descriptor pointers */
  107. iowrite32(((u64)bd_phys >> 32),
  108. bdma_chan->regs + TSI721_DMAC_DPTRH);
  109. iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
  110. bdma_chan->regs + TSI721_DMAC_DPTRL);
  111. /* Setup descriptor status FIFO */
  112. iowrite32(((u64)sts_phys >> 32),
  113. bdma_chan->regs + TSI721_DMAC_DSBH);
  114. iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
  115. bdma_chan->regs + TSI721_DMAC_DSBL);
  116. iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
  117. bdma_chan->regs + TSI721_DMAC_DSSZ);
  118. /* Clear interrupt bits */
  119. iowrite32(TSI721_DMAC_INT_ALL,
  120. bdma_chan->regs + TSI721_DMAC_INT);
  121. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  122. #ifdef CONFIG_PCI_MSI
  123. /* Request interrupt service if we are in MSI-X mode */
  124. if (priv->flags & TSI721_USING_MSIX) {
  125. int rc, idx;
  126. idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
  127. rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
  128. priv->msix[idx].irq_name, (void *)bdma_chan);
  129. if (rc) {
  130. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  131. "Unable to get MSI-X for DMAC%d-DONE",
  132. bdma_chan->id);
  133. goto err_out;
  134. }
  135. idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
  136. rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
  137. priv->msix[idx].irq_name, (void *)bdma_chan);
  138. if (rc) {
  139. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  140. "Unable to get MSI-X for DMAC%d-INT",
  141. bdma_chan->id);
  142. free_irq(
  143. priv->msix[TSI721_VECT_DMA0_DONE +
  144. bdma_chan->id].vector,
  145. (void *)bdma_chan);
  146. }
  147. err_out:
  148. if (rc) {
  149. /* Free space allocated for DMA descriptors */
  150. dma_free_coherent(dev,
  151. (bd_num + 1) * sizeof(struct tsi721_dma_desc),
  152. bd_ptr, bd_phys);
  153. bdma_chan->bd_base = NULL;
  154. /* Free space allocated for status descriptors */
  155. dma_free_coherent(dev,
  156. sts_size * sizeof(struct tsi721_dma_sts),
  157. sts_ptr, sts_phys);
  158. bdma_chan->sts_base = NULL;
  159. return -EIO;
  160. }
  161. }
  162. #endif /* CONFIG_PCI_MSI */
  163. /* Toggle DMA channel initialization */
  164. iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
  165. ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
  166. bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
  167. bdma_chan->sts_rdptr = 0;
  168. udelay(10);
  169. return 0;
  170. }
  171. static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
  172. {
  173. u32 ch_stat;
  174. #ifdef CONFIG_PCI_MSI
  175. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  176. #endif
  177. if (!bdma_chan->bd_base)
  178. return 0;
  179. /* Check if DMA channel still running */
  180. ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  181. if (ch_stat & TSI721_DMAC_STS_RUN)
  182. return -EFAULT;
  183. /* Put DMA channel into init state */
  184. iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
  185. #ifdef CONFIG_PCI_MSI
  186. if (priv->flags & TSI721_USING_MSIX) {
  187. free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
  188. bdma_chan->id].vector, (void *)bdma_chan);
  189. free_irq(priv->msix[TSI721_VECT_DMA0_INT +
  190. bdma_chan->id].vector, (void *)bdma_chan);
  191. }
  192. #endif /* CONFIG_PCI_MSI */
  193. /* Free space allocated for DMA descriptors */
  194. dma_free_coherent(bdma_chan->dchan.device->dev,
  195. (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
  196. bdma_chan->bd_base, bdma_chan->bd_phys);
  197. bdma_chan->bd_base = NULL;
  198. /* Free space allocated for status FIFO */
  199. dma_free_coherent(bdma_chan->dchan.device->dev,
  200. bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
  201. bdma_chan->sts_base, bdma_chan->sts_phys);
  202. bdma_chan->sts_base = NULL;
  203. return 0;
  204. }
  205. static void
  206. tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
  207. {
  208. if (enable) {
  209. /* Clear pending BDMA channel interrupts */
  210. iowrite32(TSI721_DMAC_INT_ALL,
  211. bdma_chan->regs + TSI721_DMAC_INT);
  212. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  213. /* Enable BDMA channel interrupts */
  214. iowrite32(TSI721_DMAC_INT_ALL,
  215. bdma_chan->regs + TSI721_DMAC_INTE);
  216. } else {
  217. /* Disable BDMA channel interrupts */
  218. iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
  219. /* Clear pending BDMA channel interrupts */
  220. iowrite32(TSI721_DMAC_INT_ALL,
  221. bdma_chan->regs + TSI721_DMAC_INT);
  222. }
  223. }
  224. static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
  225. {
  226. u32 sts;
  227. sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  228. return ((sts & TSI721_DMAC_STS_RUN) == 0);
  229. }
  230. void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
  231. {
  232. /* Disable BDMA channel interrupts */
  233. iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
  234. if (bdma_chan->active)
  235. tasklet_hi_schedule(&bdma_chan->tasklet);
  236. }
  237. #ifdef CONFIG_PCI_MSI
  238. /**
  239. * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
  240. * @irq: Linux interrupt number
  241. * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
  242. *
  243. * Handles BDMA channel interrupts signaled using MSI-X.
  244. */
  245. static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
  246. {
  247. struct tsi721_bdma_chan *bdma_chan = ptr;
  248. if (bdma_chan->active)
  249. tasklet_hi_schedule(&bdma_chan->tasklet);
  250. return IRQ_HANDLED;
  251. }
  252. #endif /* CONFIG_PCI_MSI */
  253. /* Must be called with the spinlock held */
  254. static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
  255. {
  256. if (!tsi721_dma_is_idle(bdma_chan)) {
  257. tsi_err(&bdma_chan->dchan.dev->device,
  258. "DMAC%d Attempt to start non-idle channel",
  259. bdma_chan->id);
  260. return;
  261. }
  262. if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
  263. tsi_err(&bdma_chan->dchan.dev->device,
  264. "DMAC%d Attempt to start DMA with no BDs ready %d",
  265. bdma_chan->id, task_pid_nr(current));
  266. return;
  267. }
  268. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
  269. bdma_chan->id, bdma_chan->wr_count_next,
  270. task_pid_nr(current));
  271. iowrite32(bdma_chan->wr_count_next,
  272. bdma_chan->regs + TSI721_DMAC_DWRCNT);
  273. ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
  274. bdma_chan->wr_count = bdma_chan->wr_count_next;
  275. }
  276. static int
  277. tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
  278. struct tsi721_dma_desc *bd_ptr,
  279. struct scatterlist *sg, u32 sys_size)
  280. {
  281. u64 rio_addr;
  282. if (!bd_ptr)
  283. return -EINVAL;
  284. /* Initialize DMA descriptor */
  285. bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
  286. (desc->rtype << 19) | desc->destid);
  287. bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
  288. (sys_size << 26));
  289. rio_addr = (desc->rio_addr >> 2) |
  290. ((u64)(desc->rio_addr_u & 0x3) << 62);
  291. bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
  292. bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
  293. bd_ptr->t1.bufptr_lo = cpu_to_le32(
  294. (u64)sg_dma_address(sg) & 0xffffffff);
  295. bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
  296. bd_ptr->t1.s_dist = 0;
  297. bd_ptr->t1.s_size = 0;
  298. return 0;
  299. }
  300. static int
  301. tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
  302. {
  303. if (!bd_ptr)
  304. return -EINVAL;
  305. /* Update DMA descriptor */
  306. if (interrupt)
  307. bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
  308. bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
  309. return 0;
  310. }
  311. static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
  312. struct tsi721_tx_desc *desc)
  313. {
  314. struct dma_async_tx_descriptor *txd = &desc->txd;
  315. dma_async_tx_callback callback = txd->callback;
  316. void *param = txd->callback_param;
  317. list_move(&desc->desc_node, &bdma_chan->free_list);
  318. if (callback)
  319. callback(param);
  320. }
  321. static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
  322. {
  323. u32 srd_ptr;
  324. u64 *sts_ptr;
  325. int i, j;
  326. /* Check and clear descriptor status FIFO entries */
  327. srd_ptr = bdma_chan->sts_rdptr;
  328. sts_ptr = bdma_chan->sts_base;
  329. j = srd_ptr * 8;
  330. while (sts_ptr[j]) {
  331. for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
  332. sts_ptr[j] = 0;
  333. ++srd_ptr;
  334. srd_ptr %= bdma_chan->sts_size;
  335. j = srd_ptr * 8;
  336. }
  337. iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
  338. bdma_chan->sts_rdptr = srd_ptr;
  339. }
  340. /* Must be called with the channel spinlock held */
  341. static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
  342. {
  343. struct dma_chan *dchan = desc->txd.chan;
  344. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  345. u32 sys_size;
  346. u64 rio_addr;
  347. dma_addr_t next_addr;
  348. u32 bcount;
  349. struct scatterlist *sg;
  350. unsigned int i;
  351. int err = 0;
  352. struct tsi721_dma_desc *bd_ptr = NULL;
  353. u32 idx, rd_idx;
  354. u32 add_count = 0;
  355. struct device *ch_dev = &dchan->dev->device;
  356. if (!tsi721_dma_is_idle(bdma_chan)) {
  357. tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
  358. bdma_chan->id);
  359. return -EIO;
  360. }
  361. /*
  362. * Fill DMA channel's hardware buffer descriptors.
  363. * (NOTE: RapidIO destination address is limited to 64 bits for now)
  364. */
  365. rio_addr = desc->rio_addr;
  366. next_addr = -1;
  367. bcount = 0;
  368. sys_size = dma_to_mport(dchan->device)->sys_size;
  369. rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
  370. rd_idx %= (bdma_chan->bd_num + 1);
  371. idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
  372. if (idx == bdma_chan->bd_num) {
  373. /* wrap around link descriptor */
  374. idx = 0;
  375. add_count++;
  376. }
  377. tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
  378. bdma_chan->id, rd_idx, idx);
  379. for_each_sg(desc->sg, sg, desc->sg_len, i) {
  380. tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
  381. bdma_chan->id, i, desc->sg_len,
  382. (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
  383. if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
  384. tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
  385. bdma_chan->id, i);
  386. err = -EINVAL;
  387. break;
  388. }
  389. /*
  390. * If this sg entry forms contiguous block with previous one,
  391. * try to merge it into existing DMA descriptor
  392. */
  393. if (next_addr == sg_dma_address(sg) &&
  394. bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
  395. /* Adjust byte count of the descriptor */
  396. bcount += sg_dma_len(sg);
  397. goto entry_done;
  398. } else if (next_addr != -1) {
  399. /* Finalize descriptor using total byte count value */
  400. tsi721_desc_fill_end(bd_ptr, bcount, 0);
  401. tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
  402. bdma_chan->id, bcount);
  403. }
  404. desc->rio_addr = rio_addr;
  405. if (i && idx == rd_idx) {
  406. tsi_debug(DMAV, ch_dev,
  407. "DMAC%d HW descriptor ring is full @ %d",
  408. bdma_chan->id, i);
  409. desc->sg = sg;
  410. desc->sg_len -= i;
  411. break;
  412. }
  413. bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
  414. err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
  415. if (err) {
  416. tsi_err(ch_dev, "Failed to build desc: err=%d", err);
  417. break;
  418. }
  419. tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
  420. bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
  421. next_addr = sg_dma_address(sg);
  422. bcount = sg_dma_len(sg);
  423. add_count++;
  424. if (++idx == bdma_chan->bd_num) {
  425. /* wrap around link descriptor */
  426. idx = 0;
  427. add_count++;
  428. }
  429. entry_done:
  430. if (sg_is_last(sg)) {
  431. tsi721_desc_fill_end(bd_ptr, bcount, 0);
  432. tsi_debug(DMAV, ch_dev,
  433. "DMAC%d last desc final len: %d",
  434. bdma_chan->id, bcount);
  435. desc->sg_len = 0;
  436. } else {
  437. rio_addr += sg_dma_len(sg);
  438. next_addr += sg_dma_len(sg);
  439. }
  440. }
  441. if (!err)
  442. bdma_chan->wr_count_next += add_count;
  443. return err;
  444. }
  445. static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
  446. struct tsi721_tx_desc *desc)
  447. {
  448. int err;
  449. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
  450. if (!tsi721_dma_is_idle(bdma_chan))
  451. return;
  452. /*
  453. * If there is no data transfer in progress, fetch new descriptor from
  454. * the pending queue.
  455. */
  456. if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
  457. desc = list_first_entry(&bdma_chan->queue,
  458. struct tsi721_tx_desc, desc_node);
  459. list_del_init((&desc->desc_node));
  460. bdma_chan->active_tx = desc;
  461. }
  462. if (desc) {
  463. err = tsi721_submit_sg(desc);
  464. if (!err)
  465. tsi721_start_dma(bdma_chan);
  466. else {
  467. tsi721_dma_tx_err(bdma_chan, desc);
  468. tsi_debug(DMA, &bdma_chan->dchan.dev->device,
  469. "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
  470. bdma_chan->id, err);
  471. }
  472. }
  473. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
  474. bdma_chan->id);
  475. }
  476. static void tsi721_dma_tasklet(unsigned long data)
  477. {
  478. struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
  479. u32 dmac_int, dmac_sts;
  480. dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  481. tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
  482. bdma_chan->id, dmac_int);
  483. /* Clear channel interrupts */
  484. iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
  485. if (dmac_int & TSI721_DMAC_INT_ERR) {
  486. int i = 10000;
  487. struct tsi721_tx_desc *desc;
  488. desc = bdma_chan->active_tx;
  489. dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  490. tsi_err(&bdma_chan->dchan.dev->device,
  491. "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
  492. bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
  493. /* Re-initialize DMA channel if possible */
  494. if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
  495. goto err_out;
  496. tsi721_clr_stat(bdma_chan);
  497. spin_lock(&bdma_chan->lock);
  498. /* Put DMA channel into init state */
  499. iowrite32(TSI721_DMAC_CTL_INIT,
  500. bdma_chan->regs + TSI721_DMAC_CTL);
  501. do {
  502. udelay(1);
  503. dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
  504. i--;
  505. } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
  506. if (dmac_sts & TSI721_DMAC_STS_ABORT) {
  507. tsi_err(&bdma_chan->dchan.dev->device,
  508. "Failed to re-initiate DMAC%d", bdma_chan->id);
  509. spin_unlock(&bdma_chan->lock);
  510. goto err_out;
  511. }
  512. /* Setup DMA descriptor pointers */
  513. iowrite32(((u64)bdma_chan->bd_phys >> 32),
  514. bdma_chan->regs + TSI721_DMAC_DPTRH);
  515. iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
  516. bdma_chan->regs + TSI721_DMAC_DPTRL);
  517. /* Setup descriptor status FIFO */
  518. iowrite32(((u64)bdma_chan->sts_phys >> 32),
  519. bdma_chan->regs + TSI721_DMAC_DSBH);
  520. iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
  521. bdma_chan->regs + TSI721_DMAC_DSBL);
  522. iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
  523. bdma_chan->regs + TSI721_DMAC_DSSZ);
  524. /* Clear interrupt bits */
  525. iowrite32(TSI721_DMAC_INT_ALL,
  526. bdma_chan->regs + TSI721_DMAC_INT);
  527. ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  528. bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
  529. bdma_chan->sts_rdptr = 0;
  530. udelay(10);
  531. desc = bdma_chan->active_tx;
  532. desc->status = DMA_ERROR;
  533. dma_cookie_complete(&desc->txd);
  534. list_add(&desc->desc_node, &bdma_chan->free_list);
  535. bdma_chan->active_tx = NULL;
  536. if (bdma_chan->active)
  537. tsi721_advance_work(bdma_chan, NULL);
  538. spin_unlock(&bdma_chan->lock);
  539. }
  540. if (dmac_int & TSI721_DMAC_INT_STFULL) {
  541. tsi_err(&bdma_chan->dchan.dev->device,
  542. "DMAC%d descriptor status FIFO is full",
  543. bdma_chan->id);
  544. }
  545. if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
  546. struct tsi721_tx_desc *desc;
  547. tsi721_clr_stat(bdma_chan);
  548. spin_lock(&bdma_chan->lock);
  549. desc = bdma_chan->active_tx;
  550. if (desc->sg_len == 0) {
  551. dma_async_tx_callback callback = NULL;
  552. void *param = NULL;
  553. desc->status = DMA_COMPLETE;
  554. dma_cookie_complete(&desc->txd);
  555. if (desc->txd.flags & DMA_PREP_INTERRUPT) {
  556. callback = desc->txd.callback;
  557. param = desc->txd.callback_param;
  558. }
  559. list_add(&desc->desc_node, &bdma_chan->free_list);
  560. bdma_chan->active_tx = NULL;
  561. if (bdma_chan->active)
  562. tsi721_advance_work(bdma_chan, NULL);
  563. spin_unlock(&bdma_chan->lock);
  564. if (callback)
  565. callback(param);
  566. } else {
  567. if (bdma_chan->active)
  568. tsi721_advance_work(bdma_chan,
  569. bdma_chan->active_tx);
  570. spin_unlock(&bdma_chan->lock);
  571. }
  572. }
  573. err_out:
  574. /* Re-Enable BDMA channel interrupts */
  575. iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
  576. }
  577. static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
  578. {
  579. struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
  580. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
  581. dma_cookie_t cookie;
  582. /* Check if the descriptor is detached from any lists */
  583. if (!list_empty(&desc->desc_node)) {
  584. tsi_err(&bdma_chan->dchan.dev->device,
  585. "DMAC%d wrong state of descriptor %p",
  586. bdma_chan->id, txd);
  587. return -EIO;
  588. }
  589. spin_lock_bh(&bdma_chan->lock);
  590. if (!bdma_chan->active) {
  591. spin_unlock_bh(&bdma_chan->lock);
  592. return -ENODEV;
  593. }
  594. cookie = dma_cookie_assign(txd);
  595. desc->status = DMA_IN_PROGRESS;
  596. list_add_tail(&desc->desc_node, &bdma_chan->queue);
  597. tsi721_advance_work(bdma_chan, NULL);
  598. spin_unlock_bh(&bdma_chan->lock);
  599. return cookie;
  600. }
  601. static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
  602. {
  603. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  604. struct tsi721_tx_desc *desc;
  605. int i;
  606. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  607. if (bdma_chan->bd_base)
  608. return dma_txqueue_sz;
  609. /* Initialize BDMA channel */
  610. if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
  611. tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
  612. bdma_chan->id);
  613. return -ENODEV;
  614. }
  615. /* Allocate queue of transaction descriptors */
  616. desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
  617. GFP_ATOMIC);
  618. if (!desc) {
  619. tsi721_bdma_ch_free(bdma_chan);
  620. return -ENOMEM;
  621. }
  622. bdma_chan->tx_desc = desc;
  623. for (i = 0; i < dma_txqueue_sz; i++) {
  624. dma_async_tx_descriptor_init(&desc[i].txd, dchan);
  625. desc[i].txd.tx_submit = tsi721_tx_submit;
  626. desc[i].txd.flags = DMA_CTRL_ACK;
  627. list_add(&desc[i].desc_node, &bdma_chan->free_list);
  628. }
  629. dma_cookie_init(dchan);
  630. bdma_chan->active = true;
  631. tsi721_bdma_interrupt_enable(bdma_chan, 1);
  632. return dma_txqueue_sz;
  633. }
  634. static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
  635. {
  636. struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
  637. #ifdef CONFIG_PCI_MSI
  638. if (priv->flags & TSI721_USING_MSIX) {
  639. synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
  640. bdma_chan->id].vector);
  641. synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
  642. bdma_chan->id].vector);
  643. } else
  644. #endif
  645. synchronize_irq(priv->pdev->irq);
  646. }
  647. static void tsi721_free_chan_resources(struct dma_chan *dchan)
  648. {
  649. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  650. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  651. if (!bdma_chan->bd_base)
  652. return;
  653. tsi721_bdma_interrupt_enable(bdma_chan, 0);
  654. bdma_chan->active = false;
  655. tsi721_sync_dma_irq(bdma_chan);
  656. tasklet_kill(&bdma_chan->tasklet);
  657. INIT_LIST_HEAD(&bdma_chan->free_list);
  658. kfree(bdma_chan->tx_desc);
  659. tsi721_bdma_ch_free(bdma_chan);
  660. }
  661. static
  662. enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
  663. struct dma_tx_state *txstate)
  664. {
  665. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  666. enum dma_status status;
  667. spin_lock_bh(&bdma_chan->lock);
  668. status = dma_cookie_status(dchan, cookie, txstate);
  669. spin_unlock_bh(&bdma_chan->lock);
  670. return status;
  671. }
  672. static void tsi721_issue_pending(struct dma_chan *dchan)
  673. {
  674. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  675. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  676. spin_lock_bh(&bdma_chan->lock);
  677. if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
  678. tsi721_advance_work(bdma_chan, NULL);
  679. }
  680. spin_unlock_bh(&bdma_chan->lock);
  681. }
  682. static
  683. struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
  684. struct scatterlist *sgl, unsigned int sg_len,
  685. enum dma_transfer_direction dir, unsigned long flags,
  686. void *tinfo)
  687. {
  688. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  689. struct tsi721_tx_desc *desc;
  690. struct rio_dma_ext *rext = tinfo;
  691. enum dma_rtype rtype;
  692. struct dma_async_tx_descriptor *txd = NULL;
  693. if (!sgl || !sg_len) {
  694. tsi_err(&dchan->dev->device, "DMAC%d No SG list",
  695. bdma_chan->id);
  696. return ERR_PTR(-EINVAL);
  697. }
  698. tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
  699. (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
  700. if (dir == DMA_DEV_TO_MEM)
  701. rtype = NREAD;
  702. else if (dir == DMA_MEM_TO_DEV) {
  703. switch (rext->wr_type) {
  704. case RDW_ALL_NWRITE:
  705. rtype = ALL_NWRITE;
  706. break;
  707. case RDW_ALL_NWRITE_R:
  708. rtype = ALL_NWRITE_R;
  709. break;
  710. case RDW_LAST_NWRITE_R:
  711. default:
  712. rtype = LAST_NWRITE_R;
  713. break;
  714. }
  715. } else {
  716. tsi_err(&dchan->dev->device,
  717. "DMAC%d Unsupported DMA direction option",
  718. bdma_chan->id);
  719. return ERR_PTR(-EINVAL);
  720. }
  721. spin_lock_bh(&bdma_chan->lock);
  722. if (!list_empty(&bdma_chan->free_list)) {
  723. desc = list_first_entry(&bdma_chan->free_list,
  724. struct tsi721_tx_desc, desc_node);
  725. list_del_init(&desc->desc_node);
  726. desc->destid = rext->destid;
  727. desc->rio_addr = rext->rio_addr;
  728. desc->rio_addr_u = 0;
  729. desc->rtype = rtype;
  730. desc->sg_len = sg_len;
  731. desc->sg = sgl;
  732. txd = &desc->txd;
  733. txd->flags = flags;
  734. }
  735. spin_unlock_bh(&bdma_chan->lock);
  736. if (!txd) {
  737. tsi_debug(DMA, &dchan->dev->device,
  738. "DMAC%d free TXD is not available", bdma_chan->id);
  739. return ERR_PTR(-EBUSY);
  740. }
  741. return txd;
  742. }
  743. static int tsi721_terminate_all(struct dma_chan *dchan)
  744. {
  745. struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
  746. struct tsi721_tx_desc *desc, *_d;
  747. LIST_HEAD(list);
  748. tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
  749. spin_lock_bh(&bdma_chan->lock);
  750. bdma_chan->active = false;
  751. while (!tsi721_dma_is_idle(bdma_chan)) {
  752. udelay(5);
  753. #if (0)
  754. /* make sure to stop the transfer */
  755. iowrite32(TSI721_DMAC_CTL_SUSP,
  756. bdma_chan->regs + TSI721_DMAC_CTL);
  757. /* Wait until DMA channel stops */
  758. do {
  759. dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
  760. } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
  761. #endif
  762. }
  763. if (bdma_chan->active_tx)
  764. list_add(&bdma_chan->active_tx->desc_node, &list);
  765. list_splice_init(&bdma_chan->queue, &list);
  766. list_for_each_entry_safe(desc, _d, &list, desc_node)
  767. tsi721_dma_tx_err(bdma_chan, desc);
  768. spin_unlock_bh(&bdma_chan->lock);
  769. return 0;
  770. }
  771. static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
  772. {
  773. if (!bdma_chan->active)
  774. return;
  775. spin_lock_bh(&bdma_chan->lock);
  776. if (!tsi721_dma_is_idle(bdma_chan)) {
  777. int timeout = 100000;
  778. /* stop the transfer in progress */
  779. iowrite32(TSI721_DMAC_CTL_SUSP,
  780. bdma_chan->regs + TSI721_DMAC_CTL);
  781. /* Wait until DMA channel stops */
  782. while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
  783. udelay(1);
  784. }
  785. spin_unlock_bh(&bdma_chan->lock);
  786. }
  787. void tsi721_dma_stop_all(struct tsi721_device *priv)
  788. {
  789. int i;
  790. for (i = 0; i < TSI721_DMA_MAXCH; i++) {
  791. if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
  792. tsi721_dma_stop(&priv->bdma[i]);
  793. }
  794. }
  795. int tsi721_register_dma(struct tsi721_device *priv)
  796. {
  797. int i;
  798. int nr_channels = 0;
  799. int err;
  800. struct rio_mport *mport = &priv->mport;
  801. INIT_LIST_HEAD(&mport->dma.channels);
  802. for (i = 0; i < TSI721_DMA_MAXCH; i++) {
  803. struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
  804. if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
  805. continue;
  806. bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
  807. bdma_chan->dchan.device = &mport->dma;
  808. bdma_chan->dchan.cookie = 1;
  809. bdma_chan->dchan.chan_id = i;
  810. bdma_chan->id = i;
  811. bdma_chan->active = false;
  812. spin_lock_init(&bdma_chan->lock);
  813. bdma_chan->active_tx = NULL;
  814. INIT_LIST_HEAD(&bdma_chan->queue);
  815. INIT_LIST_HEAD(&bdma_chan->free_list);
  816. tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
  817. (unsigned long)bdma_chan);
  818. list_add_tail(&bdma_chan->dchan.device_node,
  819. &mport->dma.channels);
  820. nr_channels++;
  821. }
  822. mport->dma.chancnt = nr_channels;
  823. dma_cap_zero(mport->dma.cap_mask);
  824. dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
  825. dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
  826. mport->dma.dev = &priv->pdev->dev;
  827. mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
  828. mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
  829. mport->dma.device_tx_status = tsi721_tx_status;
  830. mport->dma.device_issue_pending = tsi721_issue_pending;
  831. mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
  832. mport->dma.device_terminate_all = tsi721_terminate_all;
  833. err = dma_async_device_register(&mport->dma);
  834. if (err)
  835. tsi_err(&priv->pdev->dev, "Failed to register DMA device");
  836. return err;
  837. }
  838. void tsi721_unregister_dma(struct tsi721_device *priv)
  839. {
  840. struct rio_mport *mport = &priv->mport;
  841. struct dma_chan *chan, *_c;
  842. struct tsi721_bdma_chan *bdma_chan;
  843. tsi721_dma_stop_all(priv);
  844. dma_async_device_unregister(&mport->dma);
  845. list_for_each_entry_safe(chan, _c, &mport->dma.channels,
  846. device_node) {
  847. bdma_chan = to_tsi721_chan(chan);
  848. if (bdma_chan->active) {
  849. tsi721_bdma_interrupt_enable(bdma_chan, 0);
  850. bdma_chan->active = false;
  851. tsi721_sync_dma_irq(bdma_chan);
  852. tasklet_kill(&bdma_chan->tasklet);
  853. INIT_LIST_HEAD(&bdma_chan->free_list);
  854. kfree(bdma_chan->tx_desc);
  855. tsi721_bdma_ch_free(bdma_chan);
  856. }
  857. list_del(&chan->device_node);
  858. }
  859. }