sa11x0-dma.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SA11x0 DMAengine support
  4. *
  5. * Copyright (C) 2012 Russell King
  6. * Derived in part from arch/arm/mach-sa1100/dma.c,
  7. * Copyright (C) 2000, 2001 by Nicolas Pitre
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/device.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/init.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include "virt-dma.h"
  20. #define NR_PHY_CHAN 6
  21. #define DMA_ALIGN 3
  22. #define DMA_MAX_SIZE 0x1fff
  23. #define DMA_CHUNK_SIZE 0x1000
  24. #define DMA_DDAR 0x00
  25. #define DMA_DCSR_S 0x04
  26. #define DMA_DCSR_C 0x08
  27. #define DMA_DCSR_R 0x0c
  28. #define DMA_DBSA 0x10
  29. #define DMA_DBTA 0x14
  30. #define DMA_DBSB 0x18
  31. #define DMA_DBTB 0x1c
  32. #define DMA_SIZE 0x20
  33. #define DCSR_RUN (1 << 0)
  34. #define DCSR_IE (1 << 1)
  35. #define DCSR_ERROR (1 << 2)
  36. #define DCSR_DONEA (1 << 3)
  37. #define DCSR_STRTA (1 << 4)
  38. #define DCSR_DONEB (1 << 5)
  39. #define DCSR_STRTB (1 << 6)
  40. #define DCSR_BIU (1 << 7)
  41. #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
  42. #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
  43. #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
  44. #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
  45. #define DDAR_Ser0UDCTr (0x0 << 4)
  46. #define DDAR_Ser0UDCRc (0x1 << 4)
  47. #define DDAR_Ser1SDLCTr (0x2 << 4)
  48. #define DDAR_Ser1SDLCRc (0x3 << 4)
  49. #define DDAR_Ser1UARTTr (0x4 << 4)
  50. #define DDAR_Ser1UARTRc (0x5 << 4)
  51. #define DDAR_Ser2ICPTr (0x6 << 4)
  52. #define DDAR_Ser2ICPRc (0x7 << 4)
  53. #define DDAR_Ser3UARTTr (0x8 << 4)
  54. #define DDAR_Ser3UARTRc (0x9 << 4)
  55. #define DDAR_Ser4MCP0Tr (0xa << 4)
  56. #define DDAR_Ser4MCP0Rc (0xb << 4)
  57. #define DDAR_Ser4MCP1Tr (0xc << 4)
  58. #define DDAR_Ser4MCP1Rc (0xd << 4)
  59. #define DDAR_Ser4SSPTr (0xe << 4)
  60. #define DDAR_Ser4SSPRc (0xf << 4)
  61. struct sa11x0_dma_sg {
  62. u32 addr;
  63. u32 len;
  64. };
  65. struct sa11x0_dma_desc {
  66. struct virt_dma_desc vd;
  67. u32 ddar;
  68. size_t size;
  69. unsigned period;
  70. bool cyclic;
  71. unsigned sglen;
  72. struct sa11x0_dma_sg sg[];
  73. };
  74. struct sa11x0_dma_phy;
  75. struct sa11x0_dma_chan {
  76. struct virt_dma_chan vc;
  77. /* protected by c->vc.lock */
  78. struct sa11x0_dma_phy *phy;
  79. enum dma_status status;
  80. /* protected by d->lock */
  81. struct list_head node;
  82. u32 ddar;
  83. const char *name;
  84. };
  85. struct sa11x0_dma_phy {
  86. void __iomem *base;
  87. struct sa11x0_dma_dev *dev;
  88. unsigned num;
  89. struct sa11x0_dma_chan *vchan;
  90. /* Protected by c->vc.lock */
  91. unsigned sg_load;
  92. struct sa11x0_dma_desc *txd_load;
  93. unsigned sg_done;
  94. struct sa11x0_dma_desc *txd_done;
  95. u32 dbs[2];
  96. u32 dbt[2];
  97. u32 dcsr;
  98. };
  99. struct sa11x0_dma_dev {
  100. struct dma_device slave;
  101. void __iomem *base;
  102. spinlock_t lock;
  103. struct tasklet_struct task;
  104. struct list_head chan_pending;
  105. struct sa11x0_dma_phy phy[NR_PHY_CHAN];
  106. };
  107. static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
  108. {
  109. return container_of(chan, struct sa11x0_dma_chan, vc.chan);
  110. }
  111. static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
  112. {
  113. return container_of(dmadev, struct sa11x0_dma_dev, slave);
  114. }
  115. static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
  116. {
  117. struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
  118. return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
  119. }
  120. static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
  121. {
  122. kfree(container_of(vd, struct sa11x0_dma_desc, vd));
  123. }
  124. static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
  125. {
  126. list_del(&txd->vd.node);
  127. p->txd_load = txd;
  128. p->sg_load = 0;
  129. dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
  130. p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
  131. }
  132. static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
  133. struct sa11x0_dma_chan *c)
  134. {
  135. struct sa11x0_dma_desc *txd = p->txd_load;
  136. struct sa11x0_dma_sg *sg;
  137. void __iomem *base = p->base;
  138. unsigned dbsx, dbtx;
  139. u32 dcsr;
  140. if (!txd)
  141. return;
  142. dcsr = readl_relaxed(base + DMA_DCSR_R);
  143. /* Don't try to load the next transfer if both buffers are started */
  144. if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
  145. return;
  146. if (p->sg_load == txd->sglen) {
  147. if (!txd->cyclic) {
  148. struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
  149. /*
  150. * We have reached the end of the current descriptor.
  151. * Peek at the next descriptor, and if compatible with
  152. * the current, start processing it.
  153. */
  154. if (txn && txn->ddar == txd->ddar) {
  155. txd = txn;
  156. sa11x0_dma_start_desc(p, txn);
  157. } else {
  158. p->txd_load = NULL;
  159. return;
  160. }
  161. } else {
  162. /* Cyclic: reset back to beginning */
  163. p->sg_load = 0;
  164. }
  165. }
  166. sg = &txd->sg[p->sg_load++];
  167. /* Select buffer to load according to channel status */
  168. if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
  169. ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
  170. dbsx = DMA_DBSA;
  171. dbtx = DMA_DBTA;
  172. dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
  173. } else {
  174. dbsx = DMA_DBSB;
  175. dbtx = DMA_DBTB;
  176. dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
  177. }
  178. writel_relaxed(sg->addr, base + dbsx);
  179. writel_relaxed(sg->len, base + dbtx);
  180. writel(dcsr, base + DMA_DCSR_S);
  181. dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
  182. p->num, dcsr,
  183. 'A' + (dbsx == DMA_DBSB), sg->addr,
  184. 'A' + (dbtx == DMA_DBTB), sg->len);
  185. }
  186. static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
  187. struct sa11x0_dma_chan *c)
  188. {
  189. struct sa11x0_dma_desc *txd = p->txd_done;
  190. if (++p->sg_done == txd->sglen) {
  191. if (!txd->cyclic) {
  192. vchan_cookie_complete(&txd->vd);
  193. p->sg_done = 0;
  194. p->txd_done = p->txd_load;
  195. if (!p->txd_done)
  196. tasklet_schedule(&p->dev->task);
  197. } else {
  198. if ((p->sg_done % txd->period) == 0)
  199. vchan_cyclic_callback(&txd->vd);
  200. /* Cyclic: reset back to beginning */
  201. p->sg_done = 0;
  202. }
  203. }
  204. sa11x0_dma_start_sg(p, c);
  205. }
  206. static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
  207. {
  208. struct sa11x0_dma_phy *p = dev_id;
  209. struct sa11x0_dma_dev *d = p->dev;
  210. struct sa11x0_dma_chan *c;
  211. u32 dcsr;
  212. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  213. if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
  214. return IRQ_NONE;
  215. /* Clear reported status bits */
  216. writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
  217. p->base + DMA_DCSR_C);
  218. dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
  219. if (dcsr & DCSR_ERROR) {
  220. dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
  221. p->num, dcsr,
  222. readl_relaxed(p->base + DMA_DDAR),
  223. readl_relaxed(p->base + DMA_DBSA),
  224. readl_relaxed(p->base + DMA_DBTA),
  225. readl_relaxed(p->base + DMA_DBSB),
  226. readl_relaxed(p->base + DMA_DBTB));
  227. }
  228. c = p->vchan;
  229. if (c) {
  230. unsigned long flags;
  231. spin_lock_irqsave(&c->vc.lock, flags);
  232. /*
  233. * Now that we're holding the lock, check that the vchan
  234. * really is associated with this pchan before touching the
  235. * hardware. This should always succeed, because we won't
  236. * change p->vchan or c->phy while the channel is actively
  237. * transferring.
  238. */
  239. if (c->phy == p) {
  240. if (dcsr & DCSR_DONEA)
  241. sa11x0_dma_complete(p, c);
  242. if (dcsr & DCSR_DONEB)
  243. sa11x0_dma_complete(p, c);
  244. }
  245. spin_unlock_irqrestore(&c->vc.lock, flags);
  246. }
  247. return IRQ_HANDLED;
  248. }
  249. static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
  250. {
  251. struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
  252. /* If the issued list is empty, we have no further txds to process */
  253. if (txd) {
  254. struct sa11x0_dma_phy *p = c->phy;
  255. sa11x0_dma_start_desc(p, txd);
  256. p->txd_done = txd;
  257. p->sg_done = 0;
  258. /* The channel should not have any transfers started */
  259. WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
  260. (DCSR_STRTA | DCSR_STRTB));
  261. /* Clear the run and start bits before changing DDAR */
  262. writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
  263. p->base + DMA_DCSR_C);
  264. writel_relaxed(txd->ddar, p->base + DMA_DDAR);
  265. /* Try to start both buffers */
  266. sa11x0_dma_start_sg(p, c);
  267. sa11x0_dma_start_sg(p, c);
  268. }
  269. }
  270. static void sa11x0_dma_tasklet(struct tasklet_struct *t)
  271. {
  272. struct sa11x0_dma_dev *d = from_tasklet(d, t, task);
  273. struct sa11x0_dma_phy *p;
  274. struct sa11x0_dma_chan *c;
  275. unsigned pch, pch_alloc = 0;
  276. dev_dbg(d->slave.dev, "tasklet enter\n");
  277. list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
  278. spin_lock_irq(&c->vc.lock);
  279. p = c->phy;
  280. if (p && !p->txd_done) {
  281. sa11x0_dma_start_txd(c);
  282. if (!p->txd_done) {
  283. /* No current txd associated with this channel */
  284. dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
  285. /* Mark this channel free */
  286. c->phy = NULL;
  287. p->vchan = NULL;
  288. }
  289. }
  290. spin_unlock_irq(&c->vc.lock);
  291. }
  292. spin_lock_irq(&d->lock);
  293. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  294. p = &d->phy[pch];
  295. if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
  296. c = list_first_entry(&d->chan_pending,
  297. struct sa11x0_dma_chan, node);
  298. list_del_init(&c->node);
  299. pch_alloc |= 1 << pch;
  300. /* Mark this channel allocated */
  301. p->vchan = c;
  302. dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
  303. }
  304. }
  305. spin_unlock_irq(&d->lock);
  306. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  307. if (pch_alloc & (1 << pch)) {
  308. p = &d->phy[pch];
  309. c = p->vchan;
  310. spin_lock_irq(&c->vc.lock);
  311. c->phy = p;
  312. sa11x0_dma_start_txd(c);
  313. spin_unlock_irq(&c->vc.lock);
  314. }
  315. }
  316. dev_dbg(d->slave.dev, "tasklet exit\n");
  317. }
  318. static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
  319. {
  320. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  321. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  322. unsigned long flags;
  323. spin_lock_irqsave(&d->lock, flags);
  324. list_del_init(&c->node);
  325. spin_unlock_irqrestore(&d->lock, flags);
  326. vchan_free_chan_resources(&c->vc);
  327. }
  328. static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
  329. {
  330. unsigned reg;
  331. u32 dcsr;
  332. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  333. if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
  334. (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
  335. reg = DMA_DBSA;
  336. else
  337. reg = DMA_DBSB;
  338. return readl_relaxed(p->base + reg);
  339. }
  340. static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
  341. dma_cookie_t cookie, struct dma_tx_state *state)
  342. {
  343. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  344. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  345. struct sa11x0_dma_phy *p;
  346. struct virt_dma_desc *vd;
  347. unsigned long flags;
  348. enum dma_status ret;
  349. ret = dma_cookie_status(&c->vc.chan, cookie, state);
  350. if (ret == DMA_COMPLETE)
  351. return ret;
  352. if (!state)
  353. return c->status;
  354. spin_lock_irqsave(&c->vc.lock, flags);
  355. p = c->phy;
  356. /*
  357. * If the cookie is on our issue queue, then the residue is
  358. * its total size.
  359. */
  360. vd = vchan_find_desc(&c->vc, cookie);
  361. if (vd) {
  362. state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
  363. } else if (!p) {
  364. state->residue = 0;
  365. } else {
  366. struct sa11x0_dma_desc *txd;
  367. size_t bytes = 0;
  368. if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
  369. txd = p->txd_done;
  370. else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
  371. txd = p->txd_load;
  372. else
  373. txd = NULL;
  374. ret = c->status;
  375. if (txd) {
  376. dma_addr_t addr = sa11x0_dma_pos(p);
  377. unsigned i;
  378. dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
  379. for (i = 0; i < txd->sglen; i++) {
  380. dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
  381. i, txd->sg[i].addr, txd->sg[i].len);
  382. if (addr >= txd->sg[i].addr &&
  383. addr < txd->sg[i].addr + txd->sg[i].len) {
  384. unsigned len;
  385. len = txd->sg[i].len -
  386. (addr - txd->sg[i].addr);
  387. dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
  388. i, len);
  389. bytes += len;
  390. i++;
  391. break;
  392. }
  393. }
  394. for (; i < txd->sglen; i++) {
  395. dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
  396. i, txd->sg[i].addr, txd->sg[i].len);
  397. bytes += txd->sg[i].len;
  398. }
  399. }
  400. state->residue = bytes;
  401. }
  402. spin_unlock_irqrestore(&c->vc.lock, flags);
  403. dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
  404. return ret;
  405. }
  406. /*
  407. * Move pending txds to the issued list, and re-init pending list.
  408. * If not already pending, add this channel to the list of pending
  409. * channels and trigger the tasklet to run.
  410. */
  411. static void sa11x0_dma_issue_pending(struct dma_chan *chan)
  412. {
  413. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  414. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  415. unsigned long flags;
  416. spin_lock_irqsave(&c->vc.lock, flags);
  417. if (vchan_issue_pending(&c->vc)) {
  418. if (!c->phy) {
  419. spin_lock(&d->lock);
  420. if (list_empty(&c->node)) {
  421. list_add_tail(&c->node, &d->chan_pending);
  422. tasklet_schedule(&d->task);
  423. dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
  424. }
  425. spin_unlock(&d->lock);
  426. }
  427. } else
  428. dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
  429. spin_unlock_irqrestore(&c->vc.lock, flags);
  430. }
  431. static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
  432. struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
  433. enum dma_transfer_direction dir, unsigned long flags, void *context)
  434. {
  435. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  436. struct sa11x0_dma_desc *txd;
  437. struct scatterlist *sgent;
  438. unsigned i, j = sglen;
  439. size_t size = 0;
  440. /* SA11x0 channels can only operate in their native direction */
  441. if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
  442. dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
  443. &c->vc, c->ddar, dir);
  444. return NULL;
  445. }
  446. /* Do not allow zero-sized txds */
  447. if (sglen == 0)
  448. return NULL;
  449. for_each_sg(sg, sgent, sglen, i) {
  450. dma_addr_t addr = sg_dma_address(sgent);
  451. unsigned int len = sg_dma_len(sgent);
  452. if (len > DMA_MAX_SIZE)
  453. j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
  454. if (addr & DMA_ALIGN) {
  455. dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
  456. &c->vc, &addr);
  457. return NULL;
  458. }
  459. }
  460. txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
  461. if (!txd) {
  462. dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
  463. return NULL;
  464. }
  465. j = 0;
  466. for_each_sg(sg, sgent, sglen, i) {
  467. dma_addr_t addr = sg_dma_address(sgent);
  468. unsigned len = sg_dma_len(sgent);
  469. size += len;
  470. do {
  471. unsigned tlen = len;
  472. /*
  473. * Check whether the transfer will fit. If not, try
  474. * to split the transfer up such that we end up with
  475. * equal chunks - but make sure that we preserve the
  476. * alignment. This avoids small segments.
  477. */
  478. if (tlen > DMA_MAX_SIZE) {
  479. unsigned mult = DIV_ROUND_UP(tlen,
  480. DMA_MAX_SIZE & ~DMA_ALIGN);
  481. tlen = (tlen / mult) & ~DMA_ALIGN;
  482. }
  483. txd->sg[j].addr = addr;
  484. txd->sg[j].len = tlen;
  485. addr += tlen;
  486. len -= tlen;
  487. j++;
  488. } while (len);
  489. }
  490. txd->ddar = c->ddar;
  491. txd->size = size;
  492. txd->sglen = j;
  493. dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
  494. &c->vc, &txd->vd, txd->size, txd->sglen);
  495. return vchan_tx_prep(&c->vc, &txd->vd, flags);
  496. }
  497. static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
  498. struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
  499. enum dma_transfer_direction dir, unsigned long flags)
  500. {
  501. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  502. struct sa11x0_dma_desc *txd;
  503. unsigned i, j, k, sglen, sgperiod;
  504. /* SA11x0 channels can only operate in their native direction */
  505. if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
  506. dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
  507. &c->vc, c->ddar, dir);
  508. return NULL;
  509. }
  510. sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
  511. sglen = size * sgperiod / period;
  512. /* Do not allow zero-sized txds */
  513. if (sglen == 0)
  514. return NULL;
  515. txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
  516. if (!txd) {
  517. dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
  518. return NULL;
  519. }
  520. for (i = k = 0; i < size / period; i++) {
  521. size_t tlen, len = period;
  522. for (j = 0; j < sgperiod; j++, k++) {
  523. tlen = len;
  524. if (tlen > DMA_MAX_SIZE) {
  525. unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
  526. tlen = (tlen / mult) & ~DMA_ALIGN;
  527. }
  528. txd->sg[k].addr = addr;
  529. txd->sg[k].len = tlen;
  530. addr += tlen;
  531. len -= tlen;
  532. }
  533. WARN_ON(len != 0);
  534. }
  535. WARN_ON(k != sglen);
  536. txd->ddar = c->ddar;
  537. txd->size = size;
  538. txd->sglen = sglen;
  539. txd->cyclic = 1;
  540. txd->period = sgperiod;
  541. return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  542. }
  543. static int sa11x0_dma_device_config(struct dma_chan *chan,
  544. struct dma_slave_config *cfg)
  545. {
  546. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  547. u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
  548. dma_addr_t addr;
  549. enum dma_slave_buswidth width;
  550. u32 maxburst;
  551. if (ddar & DDAR_RW) {
  552. addr = cfg->src_addr;
  553. width = cfg->src_addr_width;
  554. maxburst = cfg->src_maxburst;
  555. } else {
  556. addr = cfg->dst_addr;
  557. width = cfg->dst_addr_width;
  558. maxburst = cfg->dst_maxburst;
  559. }
  560. if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
  561. width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
  562. (maxburst != 4 && maxburst != 8))
  563. return -EINVAL;
  564. if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
  565. ddar |= DDAR_DW;
  566. if (maxburst == 8)
  567. ddar |= DDAR_BS;
  568. dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
  569. &c->vc, &addr, width, maxburst);
  570. c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
  571. return 0;
  572. }
  573. static int sa11x0_dma_device_pause(struct dma_chan *chan)
  574. {
  575. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  576. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  577. struct sa11x0_dma_phy *p;
  578. unsigned long flags;
  579. dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
  580. spin_lock_irqsave(&c->vc.lock, flags);
  581. if (c->status == DMA_IN_PROGRESS) {
  582. c->status = DMA_PAUSED;
  583. p = c->phy;
  584. if (p) {
  585. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
  586. } else {
  587. spin_lock(&d->lock);
  588. list_del_init(&c->node);
  589. spin_unlock(&d->lock);
  590. }
  591. }
  592. spin_unlock_irqrestore(&c->vc.lock, flags);
  593. return 0;
  594. }
  595. static int sa11x0_dma_device_resume(struct dma_chan *chan)
  596. {
  597. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  598. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  599. struct sa11x0_dma_phy *p;
  600. unsigned long flags;
  601. dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
  602. spin_lock_irqsave(&c->vc.lock, flags);
  603. if (c->status == DMA_PAUSED) {
  604. c->status = DMA_IN_PROGRESS;
  605. p = c->phy;
  606. if (p) {
  607. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
  608. } else if (!list_empty(&c->vc.desc_issued)) {
  609. spin_lock(&d->lock);
  610. list_add_tail(&c->node, &d->chan_pending);
  611. spin_unlock(&d->lock);
  612. }
  613. }
  614. spin_unlock_irqrestore(&c->vc.lock, flags);
  615. return 0;
  616. }
  617. static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
  618. {
  619. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  620. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  621. struct sa11x0_dma_phy *p;
  622. LIST_HEAD(head);
  623. unsigned long flags;
  624. dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
  625. /* Clear the tx descriptor lists */
  626. spin_lock_irqsave(&c->vc.lock, flags);
  627. vchan_get_all_descriptors(&c->vc, &head);
  628. p = c->phy;
  629. if (p) {
  630. dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
  631. /* vchan is assigned to a pchan - stop the channel */
  632. writel(DCSR_RUN | DCSR_IE |
  633. DCSR_STRTA | DCSR_DONEA |
  634. DCSR_STRTB | DCSR_DONEB,
  635. p->base + DMA_DCSR_C);
  636. if (p->txd_load) {
  637. if (p->txd_load != p->txd_done)
  638. list_add_tail(&p->txd_load->vd.node, &head);
  639. p->txd_load = NULL;
  640. }
  641. if (p->txd_done) {
  642. list_add_tail(&p->txd_done->vd.node, &head);
  643. p->txd_done = NULL;
  644. }
  645. c->phy = NULL;
  646. spin_lock(&d->lock);
  647. p->vchan = NULL;
  648. spin_unlock(&d->lock);
  649. tasklet_schedule(&d->task);
  650. }
  651. spin_unlock_irqrestore(&c->vc.lock, flags);
  652. vchan_dma_desc_free_list(&c->vc, &head);
  653. return 0;
  654. }
  655. struct sa11x0_dma_channel_desc {
  656. u32 ddar;
  657. const char *name;
  658. };
  659. #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
  660. static const struct sa11x0_dma_channel_desc chan_desc[] = {
  661. CD(Ser0UDCTr, 0),
  662. CD(Ser0UDCRc, DDAR_RW),
  663. CD(Ser1SDLCTr, 0),
  664. CD(Ser1SDLCRc, DDAR_RW),
  665. CD(Ser1UARTTr, 0),
  666. CD(Ser1UARTRc, DDAR_RW),
  667. CD(Ser2ICPTr, 0),
  668. CD(Ser2ICPRc, DDAR_RW),
  669. CD(Ser3UARTTr, 0),
  670. CD(Ser3UARTRc, DDAR_RW),
  671. CD(Ser4MCP0Tr, 0),
  672. CD(Ser4MCP0Rc, DDAR_RW),
  673. CD(Ser4MCP1Tr, 0),
  674. CD(Ser4MCP1Rc, DDAR_RW),
  675. CD(Ser4SSPTr, 0),
  676. CD(Ser4SSPRc, DDAR_RW),
  677. };
  678. static const struct dma_slave_map sa11x0_dma_map[] = {
  679. { "sa11x0-ir", "tx", "Ser2ICPTr" },
  680. { "sa11x0-ir", "rx", "Ser2ICPRc" },
  681. { "sa11x0-ssp", "tx", "Ser4SSPTr" },
  682. { "sa11x0-ssp", "rx", "Ser4SSPRc" },
  683. };
  684. static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
  685. {
  686. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  687. const char *p = param;
  688. return !strcmp(c->name, p);
  689. }
  690. static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
  691. struct device *dev)
  692. {
  693. unsigned i;
  694. INIT_LIST_HEAD(&dmadev->channels);
  695. dmadev->dev = dev;
  696. dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
  697. dmadev->device_config = sa11x0_dma_device_config;
  698. dmadev->device_pause = sa11x0_dma_device_pause;
  699. dmadev->device_resume = sa11x0_dma_device_resume;
  700. dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
  701. dmadev->device_tx_status = sa11x0_dma_tx_status;
  702. dmadev->device_issue_pending = sa11x0_dma_issue_pending;
  703. for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
  704. struct sa11x0_dma_chan *c;
  705. c = kzalloc(sizeof(*c), GFP_KERNEL);
  706. if (!c) {
  707. dev_err(dev, "no memory for channel %u\n", i);
  708. return -ENOMEM;
  709. }
  710. c->status = DMA_IN_PROGRESS;
  711. c->ddar = chan_desc[i].ddar;
  712. c->name = chan_desc[i].name;
  713. INIT_LIST_HEAD(&c->node);
  714. c->vc.desc_free = sa11x0_dma_free_desc;
  715. vchan_init(&c->vc, dmadev);
  716. }
  717. return dma_async_device_register(dmadev);
  718. }
  719. static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
  720. void *data)
  721. {
  722. int irq = platform_get_irq(pdev, nr);
  723. if (irq <= 0)
  724. return -ENXIO;
  725. return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
  726. }
  727. static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
  728. void *data)
  729. {
  730. int irq = platform_get_irq(pdev, nr);
  731. if (irq > 0)
  732. free_irq(irq, data);
  733. }
  734. static void sa11x0_dma_free_channels(struct dma_device *dmadev)
  735. {
  736. struct sa11x0_dma_chan *c, *cn;
  737. list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
  738. list_del(&c->vc.chan.device_node);
  739. tasklet_kill(&c->vc.task);
  740. kfree(c);
  741. }
  742. }
  743. static int sa11x0_dma_probe(struct platform_device *pdev)
  744. {
  745. struct sa11x0_dma_dev *d;
  746. struct resource *res;
  747. unsigned i;
  748. int ret;
  749. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  750. if (!res)
  751. return -ENXIO;
  752. d = kzalloc(sizeof(*d), GFP_KERNEL);
  753. if (!d) {
  754. ret = -ENOMEM;
  755. goto err_alloc;
  756. }
  757. spin_lock_init(&d->lock);
  758. INIT_LIST_HEAD(&d->chan_pending);
  759. d->slave.filter.fn = sa11x0_dma_filter_fn;
  760. d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
  761. d->slave.filter.map = sa11x0_dma_map;
  762. d->base = ioremap(res->start, resource_size(res));
  763. if (!d->base) {
  764. ret = -ENOMEM;
  765. goto err_ioremap;
  766. }
  767. tasklet_setup(&d->task, sa11x0_dma_tasklet);
  768. for (i = 0; i < NR_PHY_CHAN; i++) {
  769. struct sa11x0_dma_phy *p = &d->phy[i];
  770. p->dev = d;
  771. p->num = i;
  772. p->base = d->base + i * DMA_SIZE;
  773. writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
  774. DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
  775. p->base + DMA_DCSR_C);
  776. writel_relaxed(0, p->base + DMA_DDAR);
  777. ret = sa11x0_dma_request_irq(pdev, i, p);
  778. if (ret) {
  779. while (i) {
  780. i--;
  781. sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
  782. }
  783. goto err_irq;
  784. }
  785. }
  786. dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
  787. dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
  788. d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
  789. d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
  790. d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  791. d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  792. d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  793. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
  794. d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  795. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
  796. ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
  797. if (ret) {
  798. dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
  799. ret);
  800. goto err_slave_reg;
  801. }
  802. platform_set_drvdata(pdev, d);
  803. return 0;
  804. err_slave_reg:
  805. sa11x0_dma_free_channels(&d->slave);
  806. for (i = 0; i < NR_PHY_CHAN; i++)
  807. sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
  808. err_irq:
  809. tasklet_kill(&d->task);
  810. iounmap(d->base);
  811. err_ioremap:
  812. kfree(d);
  813. err_alloc:
  814. return ret;
  815. }
  816. static int sa11x0_dma_remove(struct platform_device *pdev)
  817. {
  818. struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
  819. unsigned pch;
  820. dma_async_device_unregister(&d->slave);
  821. sa11x0_dma_free_channels(&d->slave);
  822. for (pch = 0; pch < NR_PHY_CHAN; pch++)
  823. sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
  824. tasklet_kill(&d->task);
  825. iounmap(d->base);
  826. kfree(d);
  827. return 0;
  828. }
  829. static int sa11x0_dma_suspend(struct device *dev)
  830. {
  831. struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
  832. unsigned pch;
  833. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  834. struct sa11x0_dma_phy *p = &d->phy[pch];
  835. u32 dcsr, saved_dcsr;
  836. dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  837. if (dcsr & DCSR_RUN) {
  838. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
  839. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  840. }
  841. saved_dcsr &= DCSR_RUN | DCSR_IE;
  842. if (dcsr & DCSR_BIU) {
  843. p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
  844. p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
  845. p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
  846. p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
  847. saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
  848. (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
  849. } else {
  850. p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
  851. p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
  852. p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
  853. p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
  854. saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
  855. }
  856. p->dcsr = saved_dcsr;
  857. writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
  858. }
  859. return 0;
  860. }
  861. static int sa11x0_dma_resume(struct device *dev)
  862. {
  863. struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
  864. unsigned pch;
  865. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  866. struct sa11x0_dma_phy *p = &d->phy[pch];
  867. struct sa11x0_dma_desc *txd = NULL;
  868. u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  869. WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
  870. if (p->txd_done)
  871. txd = p->txd_done;
  872. else if (p->txd_load)
  873. txd = p->txd_load;
  874. if (!txd)
  875. continue;
  876. writel_relaxed(txd->ddar, p->base + DMA_DDAR);
  877. writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
  878. writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
  879. writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
  880. writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
  881. writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
  882. }
  883. return 0;
  884. }
  885. static const struct dev_pm_ops sa11x0_dma_pm_ops = {
  886. .suspend_noirq = sa11x0_dma_suspend,
  887. .resume_noirq = sa11x0_dma_resume,
  888. .freeze_noirq = sa11x0_dma_suspend,
  889. .thaw_noirq = sa11x0_dma_resume,
  890. .poweroff_noirq = sa11x0_dma_suspend,
  891. .restore_noirq = sa11x0_dma_resume,
  892. };
  893. static struct platform_driver sa11x0_dma_driver = {
  894. .driver = {
  895. .name = "sa11x0-dma",
  896. .pm = &sa11x0_dma_pm_ops,
  897. },
  898. .probe = sa11x0_dma_probe,
  899. .remove = sa11x0_dma_remove,
  900. };
  901. static int __init sa11x0_dma_init(void)
  902. {
  903. return platform_driver_register(&sa11x0_dma_driver);
  904. }
  905. subsys_initcall(sa11x0_dma_init);
  906. static void __exit sa11x0_dma_exit(void)
  907. {
  908. platform_driver_unregister(&sa11x0_dma_driver);
  909. }
  910. module_exit(sa11x0_dma_exit);
  911. MODULE_AUTHOR("Russell King");
  912. MODULE_DESCRIPTION("SA-11x0 DMA driver");
  913. MODULE_LICENSE("GPL v2");
  914. MODULE_ALIAS("platform:sa11x0-dma");