dma.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/plat-omap/dma.c
  4. *
  5. * Copyright (C) 2003 - 2008 Nokia Corporation
  6. * Author: Juha Yrjölä <juha.yrjola@nokia.com>
  7. * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
  8. * Graphics DMA and LCD DMA graphics tranformations
  9. * by Imre Deak <imre.deak@nokia.com>
  10. * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
  11. * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
  12. * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
  13. *
  14. * Copyright (C) 2009 Texas Instruments
  15. * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  16. *
  17. * Support functions for the OMAP internal DMA channels.
  18. *
  19. * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
  20. * Converted DMA library into DMA platform driver.
  21. * - G, Manjunath Kondaiah <manjugk@ti.com>
  22. */
  23. #include <linux/module.h>
  24. #include <linux/init.h>
  25. #include <linux/sched.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/errno.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/irq.h>
  30. #include <linux/io.h>
  31. #include <linux/slab.h>
  32. #include <linux/delay.h>
  33. #include <linux/omap-dma.h>
  34. #ifdef CONFIG_ARCH_OMAP1
  35. #include <mach/soc.h>
  36. #endif
  37. /*
  38. * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
  39. * channels that an instance of the SDMA IP block can support. Used
  40. * to size arrays. (The actual maximum on a particular SoC may be less
  41. * than this -- for example, OMAP1 SDMA instances only support 17 logical
  42. * DMA channels.)
  43. */
  44. #define MAX_LOGICAL_DMA_CH_COUNT 32
  45. #undef DEBUG
  46. #ifndef CONFIG_ARCH_OMAP1
  47. enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
  48. DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
  49. };
  50. enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
  51. #endif
  52. #define OMAP_DMA_ACTIVE 0x01
  53. #define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff
  54. #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
  55. static struct omap_system_dma_plat_info *p;
  56. static struct omap_dma_dev_attr *d;
  57. static void omap_clear_dma(int lch);
  58. static int enable_1510_mode;
  59. static u32 errata;
  60. struct dma_link_info {
  61. int *linked_dmach_q;
  62. int no_of_lchs_linked;
  63. int q_count;
  64. int q_tail;
  65. int q_head;
  66. int chain_state;
  67. int chain_mode;
  68. };
  69. static int dma_lch_count;
  70. static int dma_chan_count;
  71. static int omap_dma_reserve_channels;
  72. static spinlock_t dma_chan_lock;
  73. static struct omap_dma_lch *dma_chan;
  74. static inline void disable_lnk(int lch);
  75. static void omap_disable_channel_irq(int lch);
  76. static inline void omap_enable_channel_irq(int lch);
  77. #ifdef CONFIG_ARCH_OMAP15XX
  78. /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
  79. static int omap_dma_in_1510_mode(void)
  80. {
  81. return enable_1510_mode;
  82. }
  83. #else
  84. #define omap_dma_in_1510_mode() 0
  85. #endif
  86. #ifdef CONFIG_ARCH_OMAP1
  87. static inline void set_gdma_dev(int req, int dev)
  88. {
  89. u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
  90. int shift = ((req - 1) % 5) * 6;
  91. u32 l;
  92. l = omap_readl(reg);
  93. l &= ~(0x3f << shift);
  94. l |= (dev - 1) << shift;
  95. omap_writel(l, reg);
  96. }
  97. #else
  98. #define set_gdma_dev(req, dev) do {} while (0)
  99. #define omap_readl(reg) 0
  100. #define omap_writel(val, reg) do {} while (0)
  101. #endif
  102. #ifdef CONFIG_ARCH_OMAP1
  103. void omap_set_dma_priority(int lch, int dst_port, int priority)
  104. {
  105. unsigned long reg;
  106. u32 l;
  107. if (dma_omap1()) {
  108. switch (dst_port) {
  109. case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */
  110. reg = OMAP_TC_OCPT1_PRIOR;
  111. break;
  112. case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */
  113. reg = OMAP_TC_OCPT2_PRIOR;
  114. break;
  115. case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */
  116. reg = OMAP_TC_EMIFF_PRIOR;
  117. break;
  118. case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */
  119. reg = OMAP_TC_EMIFS_PRIOR;
  120. break;
  121. default:
  122. BUG();
  123. return;
  124. }
  125. l = omap_readl(reg);
  126. l &= ~(0xf << 8);
  127. l |= (priority & 0xf) << 8;
  128. omap_writel(l, reg);
  129. }
  130. }
  131. #endif
  132. #ifdef CONFIG_ARCH_OMAP2PLUS
  133. void omap_set_dma_priority(int lch, int dst_port, int priority)
  134. {
  135. u32 ccr;
  136. ccr = p->dma_read(CCR, lch);
  137. if (priority)
  138. ccr |= (1 << 6);
  139. else
  140. ccr &= ~(1 << 6);
  141. p->dma_write(ccr, CCR, lch);
  142. }
  143. #endif
  144. EXPORT_SYMBOL(omap_set_dma_priority);
  145. void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
  146. int frame_count, int sync_mode,
  147. int dma_trigger, int src_or_dst_synch)
  148. {
  149. u32 l;
  150. l = p->dma_read(CSDP, lch);
  151. l &= ~0x03;
  152. l |= data_type;
  153. p->dma_write(l, CSDP, lch);
  154. if (dma_omap1()) {
  155. u16 ccr;
  156. ccr = p->dma_read(CCR, lch);
  157. ccr &= ~(1 << 5);
  158. if (sync_mode == OMAP_DMA_SYNC_FRAME)
  159. ccr |= 1 << 5;
  160. p->dma_write(ccr, CCR, lch);
  161. ccr = p->dma_read(CCR2, lch);
  162. ccr &= ~(1 << 2);
  163. if (sync_mode == OMAP_DMA_SYNC_BLOCK)
  164. ccr |= 1 << 2;
  165. p->dma_write(ccr, CCR2, lch);
  166. }
  167. if (dma_omap2plus() && dma_trigger) {
  168. u32 val;
  169. val = p->dma_read(CCR, lch);
  170. /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
  171. val &= ~((1 << 23) | (3 << 19) | 0x1f);
  172. val |= (dma_trigger & ~0x1f) << 14;
  173. val |= dma_trigger & 0x1f;
  174. if (sync_mode & OMAP_DMA_SYNC_FRAME)
  175. val |= 1 << 5;
  176. else
  177. val &= ~(1 << 5);
  178. if (sync_mode & OMAP_DMA_SYNC_BLOCK)
  179. val |= 1 << 18;
  180. else
  181. val &= ~(1 << 18);
  182. if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
  183. val &= ~(1 << 24); /* dest synch */
  184. val |= (1 << 23); /* Prefetch */
  185. } else if (src_or_dst_synch) {
  186. val |= 1 << 24; /* source synch */
  187. } else {
  188. val &= ~(1 << 24); /* dest synch */
  189. }
  190. p->dma_write(val, CCR, lch);
  191. }
  192. p->dma_write(elem_count, CEN, lch);
  193. p->dma_write(frame_count, CFN, lch);
  194. }
  195. EXPORT_SYMBOL(omap_set_dma_transfer_params);
  196. void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
  197. {
  198. if (dma_omap1() && !dma_omap15xx()) {
  199. u32 l;
  200. l = p->dma_read(LCH_CTRL, lch);
  201. l &= ~0x7;
  202. l |= mode;
  203. p->dma_write(l, LCH_CTRL, lch);
  204. }
  205. }
  206. EXPORT_SYMBOL(omap_set_dma_channel_mode);
  207. /* Note that src_port is only for omap1 */
  208. void omap_set_dma_src_params(int lch, int src_port, int src_amode,
  209. unsigned long src_start,
  210. int src_ei, int src_fi)
  211. {
  212. u32 l;
  213. if (dma_omap1()) {
  214. u16 w;
  215. w = p->dma_read(CSDP, lch);
  216. w &= ~(0x1f << 2);
  217. w |= src_port << 2;
  218. p->dma_write(w, CSDP, lch);
  219. }
  220. l = p->dma_read(CCR, lch);
  221. l &= ~(0x03 << 12);
  222. l |= src_amode << 12;
  223. p->dma_write(l, CCR, lch);
  224. p->dma_write(src_start, CSSA, lch);
  225. p->dma_write(src_ei, CSEI, lch);
  226. p->dma_write(src_fi, CSFI, lch);
  227. }
  228. EXPORT_SYMBOL(omap_set_dma_src_params);
  229. void omap_set_dma_src_data_pack(int lch, int enable)
  230. {
  231. u32 l;
  232. l = p->dma_read(CSDP, lch);
  233. l &= ~(1 << 6);
  234. if (enable)
  235. l |= (1 << 6);
  236. p->dma_write(l, CSDP, lch);
  237. }
  238. EXPORT_SYMBOL(omap_set_dma_src_data_pack);
  239. void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
  240. {
  241. unsigned int burst = 0;
  242. u32 l;
  243. l = p->dma_read(CSDP, lch);
  244. l &= ~(0x03 << 7);
  245. switch (burst_mode) {
  246. case OMAP_DMA_DATA_BURST_DIS:
  247. break;
  248. case OMAP_DMA_DATA_BURST_4:
  249. if (dma_omap2plus())
  250. burst = 0x1;
  251. else
  252. burst = 0x2;
  253. break;
  254. case OMAP_DMA_DATA_BURST_8:
  255. if (dma_omap2plus()) {
  256. burst = 0x2;
  257. break;
  258. }
  259. /*
  260. * not supported by current hardware on OMAP1
  261. * w |= (0x03 << 7);
  262. */
  263. fallthrough;
  264. case OMAP_DMA_DATA_BURST_16:
  265. if (dma_omap2plus()) {
  266. burst = 0x3;
  267. break;
  268. }
  269. /* OMAP1 don't support burst 16 */
  270. fallthrough;
  271. default:
  272. BUG();
  273. }
  274. l |= (burst << 7);
  275. p->dma_write(l, CSDP, lch);
  276. }
  277. EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
  278. /* Note that dest_port is only for OMAP1 */
  279. void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
  280. unsigned long dest_start,
  281. int dst_ei, int dst_fi)
  282. {
  283. u32 l;
  284. if (dma_omap1()) {
  285. l = p->dma_read(CSDP, lch);
  286. l &= ~(0x1f << 9);
  287. l |= dest_port << 9;
  288. p->dma_write(l, CSDP, lch);
  289. }
  290. l = p->dma_read(CCR, lch);
  291. l &= ~(0x03 << 14);
  292. l |= dest_amode << 14;
  293. p->dma_write(l, CCR, lch);
  294. p->dma_write(dest_start, CDSA, lch);
  295. p->dma_write(dst_ei, CDEI, lch);
  296. p->dma_write(dst_fi, CDFI, lch);
  297. }
  298. EXPORT_SYMBOL(omap_set_dma_dest_params);
  299. void omap_set_dma_dest_data_pack(int lch, int enable)
  300. {
  301. u32 l;
  302. l = p->dma_read(CSDP, lch);
  303. l &= ~(1 << 13);
  304. if (enable)
  305. l |= 1 << 13;
  306. p->dma_write(l, CSDP, lch);
  307. }
  308. EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
  309. void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
  310. {
  311. unsigned int burst = 0;
  312. u32 l;
  313. l = p->dma_read(CSDP, lch);
  314. l &= ~(0x03 << 14);
  315. switch (burst_mode) {
  316. case OMAP_DMA_DATA_BURST_DIS:
  317. break;
  318. case OMAP_DMA_DATA_BURST_4:
  319. if (dma_omap2plus())
  320. burst = 0x1;
  321. else
  322. burst = 0x2;
  323. break;
  324. case OMAP_DMA_DATA_BURST_8:
  325. if (dma_omap2plus())
  326. burst = 0x2;
  327. else
  328. burst = 0x3;
  329. break;
  330. case OMAP_DMA_DATA_BURST_16:
  331. if (dma_omap2plus()) {
  332. burst = 0x3;
  333. break;
  334. }
  335. /* OMAP1 don't support burst 16 */
  336. fallthrough;
  337. default:
  338. printk(KERN_ERR "Invalid DMA burst mode\n");
  339. BUG();
  340. return;
  341. }
  342. l |= (burst << 14);
  343. p->dma_write(l, CSDP, lch);
  344. }
  345. EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
  346. static inline void omap_enable_channel_irq(int lch)
  347. {
  348. /* Clear CSR */
  349. if (dma_omap1())
  350. p->dma_read(CSR, lch);
  351. else
  352. p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
  353. /* Enable some nice interrupts. */
  354. p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
  355. }
  356. static inline void omap_disable_channel_irq(int lch)
  357. {
  358. /* disable channel interrupts */
  359. p->dma_write(0, CICR, lch);
  360. /* Clear CSR */
  361. if (dma_omap1())
  362. p->dma_read(CSR, lch);
  363. else
  364. p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
  365. }
  366. void omap_disable_dma_irq(int lch, u16 bits)
  367. {
  368. dma_chan[lch].enabled_irqs &= ~bits;
  369. }
  370. EXPORT_SYMBOL(omap_disable_dma_irq);
  371. static inline void enable_lnk(int lch)
  372. {
  373. u32 l;
  374. l = p->dma_read(CLNK_CTRL, lch);
  375. if (dma_omap1())
  376. l &= ~(1 << 14);
  377. /* Set the ENABLE_LNK bits */
  378. if (dma_chan[lch].next_lch != -1)
  379. l = dma_chan[lch].next_lch | (1 << 15);
  380. p->dma_write(l, CLNK_CTRL, lch);
  381. }
  382. static inline void disable_lnk(int lch)
  383. {
  384. u32 l;
  385. l = p->dma_read(CLNK_CTRL, lch);
  386. /* Disable interrupts */
  387. omap_disable_channel_irq(lch);
  388. if (dma_omap1()) {
  389. /* Set the STOP_LNK bit */
  390. l |= 1 << 14;
  391. }
  392. if (dma_omap2plus()) {
  393. /* Clear the ENABLE_LNK bit */
  394. l &= ~(1 << 15);
  395. }
  396. p->dma_write(l, CLNK_CTRL, lch);
  397. dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
  398. }
  399. int omap_request_dma(int dev_id, const char *dev_name,
  400. void (*callback)(int lch, u16 ch_status, void *data),
  401. void *data, int *dma_ch_out)
  402. {
  403. int ch, free_ch = -1;
  404. unsigned long flags;
  405. struct omap_dma_lch *chan;
  406. WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
  407. spin_lock_irqsave(&dma_chan_lock, flags);
  408. for (ch = 0; ch < dma_chan_count; ch++) {
  409. if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
  410. free_ch = ch;
  411. /* Exit after first free channel found */
  412. break;
  413. }
  414. }
  415. if (free_ch == -1) {
  416. spin_unlock_irqrestore(&dma_chan_lock, flags);
  417. return -EBUSY;
  418. }
  419. chan = dma_chan + free_ch;
  420. chan->dev_id = dev_id;
  421. if (p->clear_lch_regs)
  422. p->clear_lch_regs(free_ch);
  423. spin_unlock_irqrestore(&dma_chan_lock, flags);
  424. chan->dev_name = dev_name;
  425. chan->callback = callback;
  426. chan->data = data;
  427. chan->flags = 0;
  428. chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
  429. if (dma_omap1())
  430. chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
  431. if (dma_omap16xx()) {
  432. /* If the sync device is set, configure it dynamically. */
  433. if (dev_id != 0) {
  434. set_gdma_dev(free_ch + 1, dev_id);
  435. dev_id = free_ch + 1;
  436. }
  437. /*
  438. * Disable the 1510 compatibility mode and set the sync device
  439. * id.
  440. */
  441. p->dma_write(dev_id | (1 << 10), CCR, free_ch);
  442. } else if (dma_omap1()) {
  443. p->dma_write(dev_id, CCR, free_ch);
  444. }
  445. *dma_ch_out = free_ch;
  446. return 0;
  447. }
  448. EXPORT_SYMBOL(omap_request_dma);
  449. void omap_free_dma(int lch)
  450. {
  451. unsigned long flags;
  452. if (dma_chan[lch].dev_id == -1) {
  453. pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
  454. lch);
  455. return;
  456. }
  457. /* Disable all DMA interrupts for the channel. */
  458. omap_disable_channel_irq(lch);
  459. /* Make sure the DMA transfer is stopped. */
  460. p->dma_write(0, CCR, lch);
  461. spin_lock_irqsave(&dma_chan_lock, flags);
  462. dma_chan[lch].dev_id = -1;
  463. dma_chan[lch].next_lch = -1;
  464. dma_chan[lch].callback = NULL;
  465. spin_unlock_irqrestore(&dma_chan_lock, flags);
  466. }
  467. EXPORT_SYMBOL(omap_free_dma);
  468. /*
  469. * Clears any DMA state so the DMA engine is ready to restart with new buffers
  470. * through omap_start_dma(). Any buffers in flight are discarded.
  471. */
  472. static void omap_clear_dma(int lch)
  473. {
  474. unsigned long flags;
  475. local_irq_save(flags);
  476. p->clear_dma(lch);
  477. local_irq_restore(flags);
  478. }
  479. void omap_start_dma(int lch)
  480. {
  481. u32 l;
  482. /*
  483. * The CPC/CDAC register needs to be initialized to zero
  484. * before starting dma transfer.
  485. */
  486. if (dma_omap15xx())
  487. p->dma_write(0, CPC, lch);
  488. else
  489. p->dma_write(0, CDAC, lch);
  490. if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
  491. int next_lch, cur_lch;
  492. char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
  493. /* Set the link register of the first channel */
  494. enable_lnk(lch);
  495. memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
  496. dma_chan_link_map[lch] = 1;
  497. cur_lch = dma_chan[lch].next_lch;
  498. do {
  499. next_lch = dma_chan[cur_lch].next_lch;
  500. /* The loop case: we've been here already */
  501. if (dma_chan_link_map[cur_lch])
  502. break;
  503. /* Mark the current channel */
  504. dma_chan_link_map[cur_lch] = 1;
  505. enable_lnk(cur_lch);
  506. omap_enable_channel_irq(cur_lch);
  507. cur_lch = next_lch;
  508. } while (next_lch != -1);
  509. } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
  510. p->dma_write(lch, CLNK_CTRL, lch);
  511. omap_enable_channel_irq(lch);
  512. l = p->dma_read(CCR, lch);
  513. if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
  514. l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
  515. l |= OMAP_DMA_CCR_EN;
  516. /*
  517. * As dma_write() uses IO accessors which are weakly ordered, there
  518. * is no guarantee that data in coherent DMA memory will be visible
  519. * to the DMA device. Add a memory barrier here to ensure that any
  520. * such data is visible prior to enabling DMA.
  521. */
  522. mb();
  523. p->dma_write(l, CCR, lch);
  524. dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
  525. }
  526. EXPORT_SYMBOL(omap_start_dma);
  527. void omap_stop_dma(int lch)
  528. {
  529. u32 l;
  530. /* Disable all interrupts on the channel */
  531. omap_disable_channel_irq(lch);
  532. l = p->dma_read(CCR, lch);
  533. if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
  534. (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
  535. int i = 0;
  536. u32 sys_cf;
  537. /* Configure No-Standby */
  538. l = p->dma_read(OCP_SYSCONFIG, lch);
  539. sys_cf = l;
  540. l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
  541. l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
  542. p->dma_write(l , OCP_SYSCONFIG, 0);
  543. l = p->dma_read(CCR, lch);
  544. l &= ~OMAP_DMA_CCR_EN;
  545. p->dma_write(l, CCR, lch);
  546. /* Wait for sDMA FIFO drain */
  547. l = p->dma_read(CCR, lch);
  548. while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
  549. OMAP_DMA_CCR_WR_ACTIVE))) {
  550. udelay(5);
  551. i++;
  552. l = p->dma_read(CCR, lch);
  553. }
  554. if (i >= 100)
  555. pr_err("DMA drain did not complete on lch %d\n", lch);
  556. /* Restore OCP_SYSCONFIG */
  557. p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
  558. } else {
  559. l &= ~OMAP_DMA_CCR_EN;
  560. p->dma_write(l, CCR, lch);
  561. }
  562. /*
  563. * Ensure that data transferred by DMA is visible to any access
  564. * after DMA has been disabled. This is important for coherent
  565. * DMA regions.
  566. */
  567. mb();
  568. if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
  569. int next_lch, cur_lch = lch;
  570. char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
  571. memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
  572. do {
  573. /* The loop case: we've been here already */
  574. if (dma_chan_link_map[cur_lch])
  575. break;
  576. /* Mark the current channel */
  577. dma_chan_link_map[cur_lch] = 1;
  578. disable_lnk(cur_lch);
  579. next_lch = dma_chan[cur_lch].next_lch;
  580. cur_lch = next_lch;
  581. } while (next_lch != -1);
  582. }
  583. dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
  584. }
  585. EXPORT_SYMBOL(omap_stop_dma);
  586. /*
  587. * Allows changing the DMA callback function or data. This may be needed if
  588. * the driver shares a single DMA channel for multiple dma triggers.
  589. */
  590. /*
  591. * Returns current physical source address for the given DMA channel.
  592. * If the channel is running the caller must disable interrupts prior calling
  593. * this function and process the returned value before re-enabling interrupt to
  594. * prevent races with the interrupt handler. Note that in continuous mode there
  595. * is a chance for CSSA_L register overflow between the two reads resulting
  596. * in incorrect return value.
  597. */
  598. dma_addr_t omap_get_dma_src_pos(int lch)
  599. {
  600. dma_addr_t offset = 0;
  601. if (dma_omap15xx())
  602. offset = p->dma_read(CPC, lch);
  603. else
  604. offset = p->dma_read(CSAC, lch);
  605. if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
  606. offset = p->dma_read(CSAC, lch);
  607. if (!dma_omap15xx()) {
  608. /*
  609. * CDAC == 0 indicates that the DMA transfer on the channel has
  610. * not been started (no data has been transferred so far).
  611. * Return the programmed source start address in this case.
  612. */
  613. if (likely(p->dma_read(CDAC, lch)))
  614. offset = p->dma_read(CSAC, lch);
  615. else
  616. offset = p->dma_read(CSSA, lch);
  617. }
  618. if (dma_omap1())
  619. offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
  620. return offset;
  621. }
  622. EXPORT_SYMBOL(omap_get_dma_src_pos);
  623. /*
  624. * Returns current physical destination address for the given DMA channel.
  625. * If the channel is running the caller must disable interrupts prior calling
  626. * this function and process the returned value before re-enabling interrupt to
  627. * prevent races with the interrupt handler. Note that in continuous mode there
  628. * is a chance for CDSA_L register overflow between the two reads resulting
  629. * in incorrect return value.
  630. */
  631. dma_addr_t omap_get_dma_dst_pos(int lch)
  632. {
  633. dma_addr_t offset = 0;
  634. if (dma_omap15xx())
  635. offset = p->dma_read(CPC, lch);
  636. else
  637. offset = p->dma_read(CDAC, lch);
  638. /*
  639. * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
  640. * read before the DMA controller finished disabling the channel.
  641. */
  642. if (!dma_omap15xx() && offset == 0) {
  643. offset = p->dma_read(CDAC, lch);
  644. /*
  645. * CDAC == 0 indicates that the DMA transfer on the channel has
  646. * not been started (no data has been transferred so far).
  647. * Return the programmed destination start address in this case.
  648. */
  649. if (unlikely(!offset))
  650. offset = p->dma_read(CDSA, lch);
  651. }
  652. if (dma_omap1())
  653. offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
  654. return offset;
  655. }
  656. EXPORT_SYMBOL(omap_get_dma_dst_pos);
  657. int omap_get_dma_active_status(int lch)
  658. {
  659. return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
  660. }
  661. EXPORT_SYMBOL(omap_get_dma_active_status);
  662. int omap_dma_running(void)
  663. {
  664. int lch;
  665. if (dma_omap1())
  666. if (omap_lcd_dma_running())
  667. return 1;
  668. for (lch = 0; lch < dma_chan_count; lch++)
  669. if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
  670. return 1;
  671. return 0;
  672. }
  673. /*----------------------------------------------------------------------------*/
  674. #ifdef CONFIG_ARCH_OMAP1
  675. static int omap1_dma_handle_ch(int ch)
  676. {
  677. u32 csr;
  678. if (enable_1510_mode && ch >= 6) {
  679. csr = dma_chan[ch].saved_csr;
  680. dma_chan[ch].saved_csr = 0;
  681. } else
  682. csr = p->dma_read(CSR, ch);
  683. if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
  684. dma_chan[ch + 6].saved_csr = csr >> 7;
  685. csr &= 0x7f;
  686. }
  687. if ((csr & 0x3f) == 0)
  688. return 0;
  689. if (unlikely(dma_chan[ch].dev_id == -1)) {
  690. pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
  691. ch, csr);
  692. return 0;
  693. }
  694. if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
  695. pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
  696. if (unlikely(csr & OMAP_DMA_DROP_IRQ))
  697. pr_warn("DMA synchronization event drop occurred with device %d\n",
  698. dma_chan[ch].dev_id);
  699. if (likely(csr & OMAP_DMA_BLOCK_IRQ))
  700. dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
  701. if (likely(dma_chan[ch].callback != NULL))
  702. dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
  703. return 1;
  704. }
  705. static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
  706. {
  707. int ch = ((int) dev_id) - 1;
  708. int handled = 0;
  709. for (;;) {
  710. int handled_now = 0;
  711. handled_now += omap1_dma_handle_ch(ch);
  712. if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
  713. handled_now += omap1_dma_handle_ch(ch + 6);
  714. if (!handled_now)
  715. break;
  716. handled += handled_now;
  717. }
  718. return handled ? IRQ_HANDLED : IRQ_NONE;
  719. }
  720. #else
  721. #define omap1_dma_irq_handler NULL
  722. #endif
  723. struct omap_system_dma_plat_info *omap_get_plat_info(void)
  724. {
  725. return p;
  726. }
  727. EXPORT_SYMBOL_GPL(omap_get_plat_info);
  728. static int omap_system_dma_probe(struct platform_device *pdev)
  729. {
  730. int ch, ret = 0;
  731. int dma_irq;
  732. char irq_name[4];
  733. p = pdev->dev.platform_data;
  734. if (!p) {
  735. dev_err(&pdev->dev,
  736. "%s: System DMA initialized without platform data\n",
  737. __func__);
  738. return -EINVAL;
  739. }
  740. d = p->dma_attr;
  741. errata = p->errata;
  742. if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
  743. && (omap_dma_reserve_channels < d->lch_count))
  744. d->lch_count = omap_dma_reserve_channels;
  745. dma_lch_count = d->lch_count;
  746. dma_chan_count = dma_lch_count;
  747. enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
  748. dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count,
  749. sizeof(*dma_chan), GFP_KERNEL);
  750. if (!dma_chan)
  751. return -ENOMEM;
  752. spin_lock_init(&dma_chan_lock);
  753. for (ch = 0; ch < dma_chan_count; ch++) {
  754. omap_clear_dma(ch);
  755. dma_chan[ch].dev_id = -1;
  756. dma_chan[ch].next_lch = -1;
  757. if (ch >= 6 && enable_1510_mode)
  758. continue;
  759. if (dma_omap1()) {
  760. /*
  761. * request_irq() doesn't like dev_id (ie. ch) being
  762. * zero, so we have to kludge around this.
  763. */
  764. sprintf(&irq_name[0], "%d", ch);
  765. dma_irq = platform_get_irq_byname(pdev, irq_name);
  766. if (dma_irq < 0) {
  767. ret = dma_irq;
  768. goto exit_dma_irq_fail;
  769. }
  770. /* INT_DMA_LCD is handled in lcd_dma.c */
  771. if (dma_irq == INT_DMA_LCD)
  772. continue;
  773. ret = request_irq(dma_irq,
  774. omap1_dma_irq_handler, 0, "DMA",
  775. (void *) (ch + 1));
  776. if (ret != 0)
  777. goto exit_dma_irq_fail;
  778. }
  779. }
  780. /* reserve dma channels 0 and 1 in high security devices on 34xx */
  781. if (d->dev_caps & HS_CHANNELS_RESERVED) {
  782. pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
  783. dma_chan[0].dev_id = 0;
  784. dma_chan[1].dev_id = 1;
  785. }
  786. p->show_dma_caps();
  787. return 0;
  788. exit_dma_irq_fail:
  789. return ret;
  790. }
  791. static int omap_system_dma_remove(struct platform_device *pdev)
  792. {
  793. int dma_irq, irq_rel = 0;
  794. if (dma_omap2plus())
  795. return 0;
  796. for ( ; irq_rel < dma_chan_count; irq_rel++) {
  797. dma_irq = platform_get_irq(pdev, irq_rel);
  798. free_irq(dma_irq, (void *)(irq_rel + 1));
  799. }
  800. return 0;
  801. }
  802. static struct platform_driver omap_system_dma_driver = {
  803. .probe = omap_system_dma_probe,
  804. .remove = omap_system_dma_remove,
  805. .driver = {
  806. .name = "omap_dma_system"
  807. },
  808. };
  809. static int __init omap_system_dma_init(void)
  810. {
  811. return platform_driver_register(&omap_system_dma_driver);
  812. }
  813. arch_initcall(omap_system_dma_init);
  814. static void __exit omap_system_dma_exit(void)
  815. {
  816. platform_driver_unregister(&omap_system_dma_driver);
  817. }
  818. MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
  819. MODULE_LICENSE("GPL");
  820. MODULE_AUTHOR("Texas Instruments Inc");
  821. /*
  822. * Reserve the omap SDMA channels using cmdline bootarg
  823. * "omap_dma_reserve_ch=". The valid range is 1 to 32
  824. */
  825. static int __init omap_dma_cmdline_reserve_ch(char *str)
  826. {
  827. if (get_option(&str, &omap_dma_reserve_channels) != 1)
  828. omap_dma_reserve_channels = 0;
  829. return 1;
  830. }
  831. __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);