sun4i-dma.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2014 Emilio López
  4. * Emilio López <emilio@elopez.com.ar>
  5. */
  6. #include <linux/bitmap.h>
  7. #include <linux/bitops.h>
  8. #include <linux/clk.h>
  9. #include <linux/dmaengine.h>
  10. #include <linux/dmapool.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/module.h>
  13. #include <linux/of_dma.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include "virt-dma.h"
  18. /** Common macros to normal and dedicated DMA registers **/
  19. #define SUN4I_DMA_CFG_LOADING BIT(31)
  20. #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25)
  21. #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23)
  22. #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21)
  23. #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16)
  24. #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9)
  25. #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7)
  26. #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
  27. #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
  28. /** Normal DMA register values **/
  29. /* Normal DMA source/destination data request type values */
  30. #define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
  31. #define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
  32. /** Normal DMA register layout **/
  33. /* Dedicated DMA source/destination address mode values */
  34. #define SUN4I_NDMA_ADDR_MODE_LINEAR 0
  35. #define SUN4I_NDMA_ADDR_MODE_IO 1
  36. /* Normal DMA configuration register layout */
  37. #define SUN4I_NDMA_CFG_CONT_MODE BIT(30)
  38. #define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27)
  39. #define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22)
  40. #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
  41. #define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
  42. /** Dedicated DMA register values **/
  43. /* Dedicated DMA source/destination address mode values */
  44. #define SUN4I_DDMA_ADDR_MODE_LINEAR 0
  45. #define SUN4I_DDMA_ADDR_MODE_IO 1
  46. #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2
  47. #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3
  48. /* Dedicated DMA source/destination data request type values */
  49. #define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
  50. #define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
  51. /** Dedicated DMA register layout **/
  52. /* Dedicated DMA configuration register layout */
  53. #define SUN4I_DDMA_CFG_BUSY BIT(30)
  54. #define SUN4I_DDMA_CFG_CONT_MODE BIT(29)
  55. #define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28)
  56. #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
  57. #define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12)
  58. /* Dedicated DMA parameter register layout */
  59. #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24)
  60. #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16)
  61. #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8)
  62. #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0)
  63. /** DMA register offsets **/
  64. /* General register offsets */
  65. #define SUN4I_DMA_IRQ_ENABLE_REG 0x0
  66. #define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4
  67. /* Normal DMA register offsets */
  68. #define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20)
  69. #define SUN4I_NDMA_CFG_REG 0x0
  70. #define SUN4I_NDMA_SRC_ADDR_REG 0x4
  71. #define SUN4I_NDMA_DST_ADDR_REG 0x8
  72. #define SUN4I_NDMA_BYTE_COUNT_REG 0xC
  73. /* Dedicated DMA register offsets */
  74. #define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20)
  75. #define SUN4I_DDMA_CFG_REG 0x0
  76. #define SUN4I_DDMA_SRC_ADDR_REG 0x4
  77. #define SUN4I_DDMA_DST_ADDR_REG 0x8
  78. #define SUN4I_DDMA_BYTE_COUNT_REG 0xC
  79. #define SUN4I_DDMA_PARA_REG 0x18
  80. /** DMA Driver **/
  81. /*
  82. * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
  83. * that's 16 channels. As for endpoints, there's 29 and 21
  84. * respectively. Given that the Normal DMA endpoints (other than
  85. * SDRAM) can be used as tx/rx, we need 78 vchans in total
  86. */
  87. #define SUN4I_NDMA_NR_MAX_CHANNELS 8
  88. #define SUN4I_DDMA_NR_MAX_CHANNELS 8
  89. #define SUN4I_DMA_NR_MAX_CHANNELS \
  90. (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
  91. #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1)
  92. #define SUN4I_DDMA_NR_MAX_VCHANS 21
  93. #define SUN4I_DMA_NR_MAX_VCHANS \
  94. (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
  95. /* This set of SUN4I_DDMA timing parameters were found experimentally while
  96. * working with the SPI driver and seem to make it behave correctly */
  97. #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
  98. (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \
  99. SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \
  100. SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
  101. SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
  102. struct sun4i_dma_pchan {
  103. /* Register base of channel */
  104. void __iomem *base;
  105. /* vchan currently being serviced */
  106. struct sun4i_dma_vchan *vchan;
  107. /* Is this a dedicated pchan? */
  108. int is_dedicated;
  109. };
  110. struct sun4i_dma_vchan {
  111. struct virt_dma_chan vc;
  112. struct dma_slave_config cfg;
  113. struct sun4i_dma_pchan *pchan;
  114. struct sun4i_dma_promise *processing;
  115. struct sun4i_dma_contract *contract;
  116. u8 endpoint;
  117. int is_dedicated;
  118. };
  119. struct sun4i_dma_promise {
  120. u32 cfg;
  121. u32 para;
  122. dma_addr_t src;
  123. dma_addr_t dst;
  124. size_t len;
  125. struct list_head list;
  126. };
  127. /* A contract is a set of promises */
  128. struct sun4i_dma_contract {
  129. struct virt_dma_desc vd;
  130. struct list_head demands;
  131. struct list_head completed_demands;
  132. int is_cyclic;
  133. };
  134. struct sun4i_dma_dev {
  135. DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
  136. struct dma_device slave;
  137. struct sun4i_dma_pchan *pchans;
  138. struct sun4i_dma_vchan *vchans;
  139. void __iomem *base;
  140. struct clk *clk;
  141. int irq;
  142. spinlock_t lock;
  143. };
  144. static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
  145. {
  146. return container_of(dev, struct sun4i_dma_dev, slave);
  147. }
  148. static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
  149. {
  150. return container_of(chan, struct sun4i_dma_vchan, vc.chan);
  151. }
  152. static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
  153. {
  154. return container_of(vd, struct sun4i_dma_contract, vd);
  155. }
  156. static struct device *chan2dev(struct dma_chan *chan)
  157. {
  158. return &chan->dev->device;
  159. }
  160. static int convert_burst(u32 maxburst)
  161. {
  162. if (maxburst > 8)
  163. return -EINVAL;
  164. /* 1 -> 0, 4 -> 1, 8 -> 2 */
  165. return (maxburst >> 2);
  166. }
  167. static int convert_buswidth(enum dma_slave_buswidth addr_width)
  168. {
  169. if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
  170. return -EINVAL;
  171. /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
  172. return (addr_width >> 1);
  173. }
  174. static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
  175. {
  176. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  177. vchan_free_chan_resources(&vchan->vc);
  178. }
  179. static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
  180. struct sun4i_dma_vchan *vchan)
  181. {
  182. struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
  183. unsigned long flags;
  184. int i, max;
  185. /*
  186. * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
  187. * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
  188. */
  189. if (vchan->is_dedicated) {
  190. i = SUN4I_NDMA_NR_MAX_CHANNELS;
  191. max = SUN4I_DMA_NR_MAX_CHANNELS;
  192. } else {
  193. i = 0;
  194. max = SUN4I_NDMA_NR_MAX_CHANNELS;
  195. }
  196. spin_lock_irqsave(&priv->lock, flags);
  197. for_each_clear_bit_from(i, priv->pchans_used, max) {
  198. pchan = &pchans[i];
  199. pchan->vchan = vchan;
  200. set_bit(i, priv->pchans_used);
  201. break;
  202. }
  203. spin_unlock_irqrestore(&priv->lock, flags);
  204. return pchan;
  205. }
  206. static void release_pchan(struct sun4i_dma_dev *priv,
  207. struct sun4i_dma_pchan *pchan)
  208. {
  209. unsigned long flags;
  210. int nr = pchan - priv->pchans;
  211. spin_lock_irqsave(&priv->lock, flags);
  212. pchan->vchan = NULL;
  213. clear_bit(nr, priv->pchans_used);
  214. spin_unlock_irqrestore(&priv->lock, flags);
  215. }
  216. static void configure_pchan(struct sun4i_dma_pchan *pchan,
  217. struct sun4i_dma_promise *d)
  218. {
  219. /*
  220. * Configure addresses and misc parameters depending on type
  221. * SUN4I_DDMA has an extra field with timing parameters
  222. */
  223. if (pchan->is_dedicated) {
  224. writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
  225. writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
  226. writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
  227. writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
  228. writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
  229. } else {
  230. writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
  231. writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
  232. writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
  233. writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
  234. }
  235. }
  236. static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
  237. struct sun4i_dma_pchan *pchan,
  238. int half, int end)
  239. {
  240. u32 reg;
  241. int pchan_number = pchan - priv->pchans;
  242. unsigned long flags;
  243. spin_lock_irqsave(&priv->lock, flags);
  244. reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  245. if (half)
  246. reg |= BIT(pchan_number * 2);
  247. else
  248. reg &= ~BIT(pchan_number * 2);
  249. if (end)
  250. reg |= BIT(pchan_number * 2 + 1);
  251. else
  252. reg &= ~BIT(pchan_number * 2 + 1);
  253. writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  254. spin_unlock_irqrestore(&priv->lock, flags);
  255. }
  256. /*
  257. * Execute pending operations on a vchan
  258. *
  259. * When given a vchan, this function will try to acquire a suitable
  260. * pchan and, if successful, will configure it to fulfill a promise
  261. * from the next pending contract.
  262. *
  263. * This function must be called with &vchan->vc.lock held.
  264. */
  265. static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
  266. struct sun4i_dma_vchan *vchan)
  267. {
  268. struct sun4i_dma_promise *promise = NULL;
  269. struct sun4i_dma_contract *contract = NULL;
  270. struct sun4i_dma_pchan *pchan;
  271. struct virt_dma_desc *vd;
  272. int ret;
  273. lockdep_assert_held(&vchan->vc.lock);
  274. /* We need a pchan to do anything, so secure one if available */
  275. pchan = find_and_use_pchan(priv, vchan);
  276. if (!pchan)
  277. return -EBUSY;
  278. /*
  279. * Channel endpoints must not be repeated, so if this vchan
  280. * has already submitted some work, we can't do anything else
  281. */
  282. if (vchan->processing) {
  283. dev_dbg(chan2dev(&vchan->vc.chan),
  284. "processing something to this endpoint already\n");
  285. ret = -EBUSY;
  286. goto release_pchan;
  287. }
  288. do {
  289. /* Figure out which contract we're working with today */
  290. vd = vchan_next_desc(&vchan->vc);
  291. if (!vd) {
  292. dev_dbg(chan2dev(&vchan->vc.chan),
  293. "No pending contract found");
  294. ret = 0;
  295. goto release_pchan;
  296. }
  297. contract = to_sun4i_dma_contract(vd);
  298. if (list_empty(&contract->demands)) {
  299. /* The contract has been completed so mark it as such */
  300. list_del(&contract->vd.node);
  301. vchan_cookie_complete(&contract->vd);
  302. dev_dbg(chan2dev(&vchan->vc.chan),
  303. "Empty contract found and marked complete");
  304. }
  305. } while (list_empty(&contract->demands));
  306. /* Now find out what we need to do */
  307. promise = list_first_entry(&contract->demands,
  308. struct sun4i_dma_promise, list);
  309. vchan->processing = promise;
  310. /* ... and make it reality */
  311. if (promise) {
  312. vchan->contract = contract;
  313. vchan->pchan = pchan;
  314. set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
  315. configure_pchan(pchan, promise);
  316. }
  317. return 0;
  318. release_pchan:
  319. release_pchan(priv, pchan);
  320. return ret;
  321. }
  322. static int sanitize_config(struct dma_slave_config *sconfig,
  323. enum dma_transfer_direction direction)
  324. {
  325. switch (direction) {
  326. case DMA_MEM_TO_DEV:
  327. if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
  328. !sconfig->dst_maxburst)
  329. return -EINVAL;
  330. if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  331. sconfig->src_addr_width = sconfig->dst_addr_width;
  332. if (!sconfig->src_maxburst)
  333. sconfig->src_maxburst = sconfig->dst_maxburst;
  334. break;
  335. case DMA_DEV_TO_MEM:
  336. if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
  337. !sconfig->src_maxburst)
  338. return -EINVAL;
  339. if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
  340. sconfig->dst_addr_width = sconfig->src_addr_width;
  341. if (!sconfig->dst_maxburst)
  342. sconfig->dst_maxburst = sconfig->src_maxburst;
  343. break;
  344. default:
  345. return 0;
  346. }
  347. return 0;
  348. }
  349. /*
  350. * Generate a promise, to be used in a normal DMA contract.
  351. *
  352. * A NDMA promise contains all the information required to program the
  353. * normal part of the DMA Engine and get data copied. A non-executed
  354. * promise will live in the demands list on a contract. Once it has been
  355. * completed, it will be moved to the completed demands list for later freeing.
  356. * All linked promises will be freed when the corresponding contract is freed
  357. */
  358. static struct sun4i_dma_promise *
  359. generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
  360. size_t len, struct dma_slave_config *sconfig,
  361. enum dma_transfer_direction direction)
  362. {
  363. struct sun4i_dma_promise *promise;
  364. int ret;
  365. ret = sanitize_config(sconfig, direction);
  366. if (ret)
  367. return NULL;
  368. promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
  369. if (!promise)
  370. return NULL;
  371. promise->src = src;
  372. promise->dst = dest;
  373. promise->len = len;
  374. promise->cfg = SUN4I_DMA_CFG_LOADING |
  375. SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
  376. dev_dbg(chan2dev(chan),
  377. "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
  378. sconfig->src_maxburst, sconfig->dst_maxburst,
  379. sconfig->src_addr_width, sconfig->dst_addr_width);
  380. /* Source burst */
  381. ret = convert_burst(sconfig->src_maxburst);
  382. if (ret < 0)
  383. goto fail;
  384. promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
  385. /* Destination burst */
  386. ret = convert_burst(sconfig->dst_maxburst);
  387. if (ret < 0)
  388. goto fail;
  389. promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
  390. /* Source bus width */
  391. ret = convert_buswidth(sconfig->src_addr_width);
  392. if (ret < 0)
  393. goto fail;
  394. promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
  395. /* Destination bus width */
  396. ret = convert_buswidth(sconfig->dst_addr_width);
  397. if (ret < 0)
  398. goto fail;
  399. promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
  400. return promise;
  401. fail:
  402. kfree(promise);
  403. return NULL;
  404. }
  405. /*
  406. * Generate a promise, to be used in a dedicated DMA contract.
  407. *
  408. * A DDMA promise contains all the information required to program the
  409. * Dedicated part of the DMA Engine and get data copied. A non-executed
  410. * promise will live in the demands list on a contract. Once it has been
  411. * completed, it will be moved to the completed demands list for later freeing.
  412. * All linked promises will be freed when the corresponding contract is freed
  413. */
  414. static struct sun4i_dma_promise *
  415. generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
  416. size_t len, struct dma_slave_config *sconfig)
  417. {
  418. struct sun4i_dma_promise *promise;
  419. int ret;
  420. promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
  421. if (!promise)
  422. return NULL;
  423. promise->src = src;
  424. promise->dst = dest;
  425. promise->len = len;
  426. promise->cfg = SUN4I_DMA_CFG_LOADING |
  427. SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
  428. /* Source burst */
  429. ret = convert_burst(sconfig->src_maxburst);
  430. if (ret < 0)
  431. goto fail;
  432. promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
  433. /* Destination burst */
  434. ret = convert_burst(sconfig->dst_maxburst);
  435. if (ret < 0)
  436. goto fail;
  437. promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
  438. /* Source bus width */
  439. ret = convert_buswidth(sconfig->src_addr_width);
  440. if (ret < 0)
  441. goto fail;
  442. promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
  443. /* Destination bus width */
  444. ret = convert_buswidth(sconfig->dst_addr_width);
  445. if (ret < 0)
  446. goto fail;
  447. promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
  448. return promise;
  449. fail:
  450. kfree(promise);
  451. return NULL;
  452. }
  453. /*
  454. * Generate a contract
  455. *
  456. * Contracts function as DMA descriptors. As our hardware does not support
  457. * linked lists, we need to implement SG via software. We use a contract
  458. * to hold all the pieces of the request and process them serially one
  459. * after another. Each piece is represented as a promise.
  460. */
  461. static struct sun4i_dma_contract *generate_dma_contract(void)
  462. {
  463. struct sun4i_dma_contract *contract;
  464. contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
  465. if (!contract)
  466. return NULL;
  467. INIT_LIST_HEAD(&contract->demands);
  468. INIT_LIST_HEAD(&contract->completed_demands);
  469. return contract;
  470. }
  471. /*
  472. * Get next promise on a cyclic transfer
  473. *
  474. * Cyclic contracts contain a series of promises which are executed on a
  475. * loop. This function returns the next promise from a cyclic contract,
  476. * so it can be programmed into the hardware.
  477. */
  478. static struct sun4i_dma_promise *
  479. get_next_cyclic_promise(struct sun4i_dma_contract *contract)
  480. {
  481. struct sun4i_dma_promise *promise;
  482. promise = list_first_entry_or_null(&contract->demands,
  483. struct sun4i_dma_promise, list);
  484. if (!promise) {
  485. list_splice_init(&contract->completed_demands,
  486. &contract->demands);
  487. promise = list_first_entry(&contract->demands,
  488. struct sun4i_dma_promise, list);
  489. }
  490. return promise;
  491. }
  492. /*
  493. * Free a contract and all its associated promises
  494. */
  495. static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
  496. {
  497. struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
  498. struct sun4i_dma_promise *promise, *tmp;
  499. /* Free all the demands and completed demands */
  500. list_for_each_entry_safe(promise, tmp, &contract->demands, list)
  501. kfree(promise);
  502. list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
  503. kfree(promise);
  504. kfree(contract);
  505. }
  506. static struct dma_async_tx_descriptor *
  507. sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
  508. dma_addr_t src, size_t len, unsigned long flags)
  509. {
  510. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  511. struct dma_slave_config *sconfig = &vchan->cfg;
  512. struct sun4i_dma_promise *promise;
  513. struct sun4i_dma_contract *contract;
  514. contract = generate_dma_contract();
  515. if (!contract)
  516. return NULL;
  517. /*
  518. * We can only do the copy to bus aligned addresses, so
  519. * choose the best one so we get decent performance. We also
  520. * maximize the burst size for this same reason.
  521. */
  522. sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  523. sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  524. sconfig->src_maxburst = 8;
  525. sconfig->dst_maxburst = 8;
  526. if (vchan->is_dedicated)
  527. promise = generate_ddma_promise(chan, src, dest, len, sconfig);
  528. else
  529. promise = generate_ndma_promise(chan, src, dest, len, sconfig,
  530. DMA_MEM_TO_MEM);
  531. if (!promise) {
  532. kfree(contract);
  533. return NULL;
  534. }
  535. /* Configure memcpy mode */
  536. if (vchan->is_dedicated) {
  537. promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
  538. SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
  539. } else {
  540. promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
  541. SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
  542. }
  543. /* Fill the contract with our only promise */
  544. list_add_tail(&promise->list, &contract->demands);
  545. /* And add it to the vchan */
  546. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  547. }
  548. static struct dma_async_tx_descriptor *
  549. sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
  550. size_t period_len, enum dma_transfer_direction dir,
  551. unsigned long flags)
  552. {
  553. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  554. struct dma_slave_config *sconfig = &vchan->cfg;
  555. struct sun4i_dma_promise *promise;
  556. struct sun4i_dma_contract *contract;
  557. dma_addr_t src, dest;
  558. u32 endpoints;
  559. int nr_periods, offset, plength, i;
  560. u8 ram_type, io_mode, linear_mode;
  561. if (!is_slave_direction(dir)) {
  562. dev_err(chan2dev(chan), "Invalid DMA direction\n");
  563. return NULL;
  564. }
  565. contract = generate_dma_contract();
  566. if (!contract)
  567. return NULL;
  568. contract->is_cyclic = 1;
  569. if (vchan->is_dedicated) {
  570. io_mode = SUN4I_DDMA_ADDR_MODE_IO;
  571. linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
  572. ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
  573. } else {
  574. io_mode = SUN4I_NDMA_ADDR_MODE_IO;
  575. linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
  576. ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
  577. }
  578. if (dir == DMA_MEM_TO_DEV) {
  579. src = buf;
  580. dest = sconfig->dst_addr;
  581. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
  582. SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
  583. SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
  584. SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
  585. } else {
  586. src = sconfig->src_addr;
  587. dest = buf;
  588. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
  589. SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
  590. SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
  591. SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
  592. }
  593. /*
  594. * We will be using half done interrupts to make two periods
  595. * out of a promise, so we need to program the DMA engine less
  596. * often
  597. */
  598. /*
  599. * The engine can interrupt on half-transfer, so we can use
  600. * this feature to program the engine half as often as if we
  601. * didn't use it (keep in mind the hardware doesn't support
  602. * linked lists).
  603. *
  604. * Say you have a set of periods (| marks the start/end, I for
  605. * interrupt, P for programming the engine to do a new
  606. * transfer), the easy but slow way would be to do
  607. *
  608. * |---|---|---|---| (periods / promises)
  609. * P I,P I,P I,P I
  610. *
  611. * Using half transfer interrupts you can do
  612. *
  613. * |-------|-------| (promises as configured on hw)
  614. * |---|---|---|---| (periods)
  615. * P I I,P I I
  616. *
  617. * Which requires half the engine programming for the same
  618. * functionality.
  619. */
  620. nr_periods = DIV_ROUND_UP(len / period_len, 2);
  621. for (i = 0; i < nr_periods; i++) {
  622. /* Calculate the offset in the buffer and the length needed */
  623. offset = i * period_len * 2;
  624. plength = min((len - offset), (period_len * 2));
  625. if (dir == DMA_MEM_TO_DEV)
  626. src = buf + offset;
  627. else
  628. dest = buf + offset;
  629. /* Make the promise */
  630. if (vchan->is_dedicated)
  631. promise = generate_ddma_promise(chan, src, dest,
  632. plength, sconfig);
  633. else
  634. promise = generate_ndma_promise(chan, src, dest,
  635. plength, sconfig, dir);
  636. if (!promise) {
  637. /* TODO: should we free everything? */
  638. return NULL;
  639. }
  640. promise->cfg |= endpoints;
  641. /* Then add it to the contract */
  642. list_add_tail(&promise->list, &contract->demands);
  643. }
  644. /* And add it to the vchan */
  645. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  646. }
  647. static struct dma_async_tx_descriptor *
  648. sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  649. unsigned int sg_len, enum dma_transfer_direction dir,
  650. unsigned long flags, void *context)
  651. {
  652. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  653. struct dma_slave_config *sconfig = &vchan->cfg;
  654. struct sun4i_dma_promise *promise;
  655. struct sun4i_dma_contract *contract;
  656. u8 ram_type, io_mode, linear_mode;
  657. struct scatterlist *sg;
  658. dma_addr_t srcaddr, dstaddr;
  659. u32 endpoints, para;
  660. int i;
  661. if (!sgl)
  662. return NULL;
  663. if (!is_slave_direction(dir)) {
  664. dev_err(chan2dev(chan), "Invalid DMA direction\n");
  665. return NULL;
  666. }
  667. contract = generate_dma_contract();
  668. if (!contract)
  669. return NULL;
  670. if (vchan->is_dedicated) {
  671. io_mode = SUN4I_DDMA_ADDR_MODE_IO;
  672. linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
  673. ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
  674. } else {
  675. io_mode = SUN4I_NDMA_ADDR_MODE_IO;
  676. linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
  677. ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
  678. }
  679. if (dir == DMA_MEM_TO_DEV)
  680. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
  681. SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
  682. SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
  683. SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
  684. else
  685. endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
  686. SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
  687. SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
  688. SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
  689. for_each_sg(sgl, sg, sg_len, i) {
  690. /* Figure out addresses */
  691. if (dir == DMA_MEM_TO_DEV) {
  692. srcaddr = sg_dma_address(sg);
  693. dstaddr = sconfig->dst_addr;
  694. } else {
  695. srcaddr = sconfig->src_addr;
  696. dstaddr = sg_dma_address(sg);
  697. }
  698. /*
  699. * These are the magic DMA engine timings that keep SPI going.
  700. * I haven't seen any interface on DMAEngine to configure
  701. * timings, and so far they seem to work for everything we
  702. * support, so I've kept them here. I don't know if other
  703. * devices need different timings because, as usual, we only
  704. * have the "para" bitfield meanings, but no comment on what
  705. * the values should be when doing a certain operation :|
  706. */
  707. para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
  708. /* And make a suitable promise */
  709. if (vchan->is_dedicated)
  710. promise = generate_ddma_promise(chan, srcaddr, dstaddr,
  711. sg_dma_len(sg),
  712. sconfig);
  713. else
  714. promise = generate_ndma_promise(chan, srcaddr, dstaddr,
  715. sg_dma_len(sg),
  716. sconfig, dir);
  717. if (!promise)
  718. return NULL; /* TODO: should we free everything? */
  719. promise->cfg |= endpoints;
  720. promise->para = para;
  721. /* Then add it to the contract */
  722. list_add_tail(&promise->list, &contract->demands);
  723. }
  724. /*
  725. * Once we've got all the promises ready, add the contract
  726. * to the pending list on the vchan
  727. */
  728. return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
  729. }
  730. static int sun4i_dma_terminate_all(struct dma_chan *chan)
  731. {
  732. struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
  733. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  734. struct sun4i_dma_pchan *pchan = vchan->pchan;
  735. LIST_HEAD(head);
  736. unsigned long flags;
  737. spin_lock_irqsave(&vchan->vc.lock, flags);
  738. vchan_get_all_descriptors(&vchan->vc, &head);
  739. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  740. /*
  741. * Clearing the configuration register will halt the pchan. Interrupts
  742. * may still trigger, so don't forget to disable them.
  743. */
  744. if (pchan) {
  745. if (pchan->is_dedicated)
  746. writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
  747. else
  748. writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
  749. set_pchan_interrupt(priv, pchan, 0, 0);
  750. release_pchan(priv, pchan);
  751. }
  752. spin_lock_irqsave(&vchan->vc.lock, flags);
  753. /* Clear these so the vchan is usable again */
  754. vchan->processing = NULL;
  755. vchan->pchan = NULL;
  756. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  757. vchan_dma_desc_free_list(&vchan->vc, &head);
  758. return 0;
  759. }
  760. static int sun4i_dma_config(struct dma_chan *chan,
  761. struct dma_slave_config *config)
  762. {
  763. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  764. memcpy(&vchan->cfg, config, sizeof(*config));
  765. return 0;
  766. }
  767. static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
  768. struct of_dma *ofdma)
  769. {
  770. struct sun4i_dma_dev *priv = ofdma->of_dma_data;
  771. struct sun4i_dma_vchan *vchan;
  772. struct dma_chan *chan;
  773. u8 is_dedicated = dma_spec->args[0];
  774. u8 endpoint = dma_spec->args[1];
  775. /* Check if type is Normal or Dedicated */
  776. if (is_dedicated != 0 && is_dedicated != 1)
  777. return NULL;
  778. /* Make sure the endpoint looks sane */
  779. if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
  780. (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
  781. return NULL;
  782. chan = dma_get_any_slave_channel(&priv->slave);
  783. if (!chan)
  784. return NULL;
  785. /* Assign the endpoint to the vchan */
  786. vchan = to_sun4i_dma_vchan(chan);
  787. vchan->is_dedicated = is_dedicated;
  788. vchan->endpoint = endpoint;
  789. return chan;
  790. }
  791. static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
  792. dma_cookie_t cookie,
  793. struct dma_tx_state *state)
  794. {
  795. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  796. struct sun4i_dma_pchan *pchan = vchan->pchan;
  797. struct sun4i_dma_contract *contract;
  798. struct sun4i_dma_promise *promise;
  799. struct virt_dma_desc *vd;
  800. unsigned long flags;
  801. enum dma_status ret;
  802. size_t bytes = 0;
  803. ret = dma_cookie_status(chan, cookie, state);
  804. if (!state || (ret == DMA_COMPLETE))
  805. return ret;
  806. spin_lock_irqsave(&vchan->vc.lock, flags);
  807. vd = vchan_find_desc(&vchan->vc, cookie);
  808. if (!vd)
  809. goto exit;
  810. contract = to_sun4i_dma_contract(vd);
  811. list_for_each_entry(promise, &contract->demands, list)
  812. bytes += promise->len;
  813. /*
  814. * The hardware is configured to return the remaining byte
  815. * quantity. If possible, replace the first listed element's
  816. * full size with the actual remaining amount
  817. */
  818. promise = list_first_entry_or_null(&contract->demands,
  819. struct sun4i_dma_promise, list);
  820. if (promise && pchan) {
  821. bytes -= promise->len;
  822. if (pchan->is_dedicated)
  823. bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
  824. else
  825. bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
  826. }
  827. exit:
  828. dma_set_residue(state, bytes);
  829. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  830. return ret;
  831. }
  832. static void sun4i_dma_issue_pending(struct dma_chan *chan)
  833. {
  834. struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
  835. struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
  836. unsigned long flags;
  837. spin_lock_irqsave(&vchan->vc.lock, flags);
  838. /*
  839. * If there are pending transactions for this vchan, push one of
  840. * them into the engine to get the ball rolling.
  841. */
  842. if (vchan_issue_pending(&vchan->vc))
  843. __execute_vchan_pending(priv, vchan);
  844. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  845. }
  846. static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
  847. {
  848. struct sun4i_dma_dev *priv = dev_id;
  849. struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
  850. struct sun4i_dma_vchan *vchan;
  851. struct sun4i_dma_contract *contract;
  852. struct sun4i_dma_promise *promise;
  853. unsigned long pendirq, irqs, disableirqs;
  854. int bit, i, free_room, allow_mitigation = 1;
  855. pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  856. handle_pending:
  857. disableirqs = 0;
  858. free_room = 0;
  859. for_each_set_bit(bit, &pendirq, 32) {
  860. pchan = &pchans[bit >> 1];
  861. vchan = pchan->vchan;
  862. if (!vchan) /* a terminated channel may still interrupt */
  863. continue;
  864. contract = vchan->contract;
  865. /*
  866. * Disable the IRQ and free the pchan if it's an end
  867. * interrupt (odd bit)
  868. */
  869. if (bit & 1) {
  870. spin_lock(&vchan->vc.lock);
  871. /*
  872. * Move the promise into the completed list now that
  873. * we're done with it
  874. */
  875. list_del(&vchan->processing->list);
  876. list_add_tail(&vchan->processing->list,
  877. &contract->completed_demands);
  878. /*
  879. * Cyclic DMA transfers are special:
  880. * - There's always something we can dispatch
  881. * - We need to run the callback
  882. * - Latency is very important, as this is used by audio
  883. * We therefore just cycle through the list and dispatch
  884. * whatever we have here, reusing the pchan. There's
  885. * no need to run the thread after this.
  886. *
  887. * For non-cyclic transfers we need to look around,
  888. * so we can program some more work, or notify the
  889. * client that their transfers have been completed.
  890. */
  891. if (contract->is_cyclic) {
  892. promise = get_next_cyclic_promise(contract);
  893. vchan->processing = promise;
  894. configure_pchan(pchan, promise);
  895. vchan_cyclic_callback(&contract->vd);
  896. } else {
  897. vchan->processing = NULL;
  898. vchan->pchan = NULL;
  899. free_room = 1;
  900. disableirqs |= BIT(bit);
  901. release_pchan(priv, pchan);
  902. }
  903. spin_unlock(&vchan->vc.lock);
  904. } else {
  905. /* Half done interrupt */
  906. if (contract->is_cyclic)
  907. vchan_cyclic_callback(&contract->vd);
  908. else
  909. disableirqs |= BIT(bit);
  910. }
  911. }
  912. /* Disable the IRQs for events we handled */
  913. spin_lock(&priv->lock);
  914. irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  915. writel_relaxed(irqs & ~disableirqs,
  916. priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  917. spin_unlock(&priv->lock);
  918. /* Writing 1 to the pending field will clear the pending interrupt */
  919. writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  920. /*
  921. * If a pchan was freed, we may be able to schedule something else,
  922. * so have a look around
  923. */
  924. if (free_room) {
  925. for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
  926. vchan = &priv->vchans[i];
  927. spin_lock(&vchan->vc.lock);
  928. __execute_vchan_pending(priv, vchan);
  929. spin_unlock(&vchan->vc.lock);
  930. }
  931. }
  932. /*
  933. * Handle newer interrupts if some showed up, but only do it once
  934. * to avoid a too long a loop
  935. */
  936. if (allow_mitigation) {
  937. pendirq = readl_relaxed(priv->base +
  938. SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  939. if (pendirq) {
  940. allow_mitigation = 0;
  941. goto handle_pending;
  942. }
  943. }
  944. return IRQ_HANDLED;
  945. }
  946. static int sun4i_dma_probe(struct platform_device *pdev)
  947. {
  948. struct sun4i_dma_dev *priv;
  949. struct resource *res;
  950. int i, j, ret;
  951. priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
  952. if (!priv)
  953. return -ENOMEM;
  954. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  955. priv->base = devm_ioremap_resource(&pdev->dev, res);
  956. if (IS_ERR(priv->base))
  957. return PTR_ERR(priv->base);
  958. priv->irq = platform_get_irq(pdev, 0);
  959. if (priv->irq < 0)
  960. return priv->irq;
  961. priv->clk = devm_clk_get(&pdev->dev, NULL);
  962. if (IS_ERR(priv->clk)) {
  963. dev_err(&pdev->dev, "No clock specified\n");
  964. return PTR_ERR(priv->clk);
  965. }
  966. platform_set_drvdata(pdev, priv);
  967. spin_lock_init(&priv->lock);
  968. dma_cap_zero(priv->slave.cap_mask);
  969. dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
  970. dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
  971. dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
  972. dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
  973. INIT_LIST_HEAD(&priv->slave.channels);
  974. priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources;
  975. priv->slave.device_tx_status = sun4i_dma_tx_status;
  976. priv->slave.device_issue_pending = sun4i_dma_issue_pending;
  977. priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg;
  978. priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy;
  979. priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic;
  980. priv->slave.device_config = sun4i_dma_config;
  981. priv->slave.device_terminate_all = sun4i_dma_terminate_all;
  982. priv->slave.copy_align = 2;
  983. priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  984. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  985. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  986. priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  987. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  988. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  989. priv->slave.directions = BIT(DMA_DEV_TO_MEM) |
  990. BIT(DMA_MEM_TO_DEV);
  991. priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  992. priv->slave.dev = &pdev->dev;
  993. priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
  994. sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
  995. priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
  996. sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
  997. if (!priv->vchans || !priv->pchans)
  998. return -ENOMEM;
  999. /*
  1000. * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
  1001. * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
  1002. * dedicated ones
  1003. */
  1004. for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
  1005. priv->pchans[i].base = priv->base +
  1006. SUN4I_NDMA_CHANNEL_REG_BASE(i);
  1007. for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
  1008. priv->pchans[i].base = priv->base +
  1009. SUN4I_DDMA_CHANNEL_REG_BASE(j);
  1010. priv->pchans[i].is_dedicated = 1;
  1011. }
  1012. for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
  1013. struct sun4i_dma_vchan *vchan = &priv->vchans[i];
  1014. spin_lock_init(&vchan->vc.lock);
  1015. vchan->vc.desc_free = sun4i_dma_free_contract;
  1016. vchan_init(&vchan->vc, &priv->slave);
  1017. }
  1018. ret = clk_prepare_enable(priv->clk);
  1019. if (ret) {
  1020. dev_err(&pdev->dev, "Couldn't enable the clock\n");
  1021. return ret;
  1022. }
  1023. /*
  1024. * Make sure the IRQs are all disabled and accounted for. The bootloader
  1025. * likes to leave these dirty
  1026. */
  1027. writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
  1028. writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
  1029. ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
  1030. 0, dev_name(&pdev->dev), priv);
  1031. if (ret) {
  1032. dev_err(&pdev->dev, "Cannot request IRQ\n");
  1033. goto err_clk_disable;
  1034. }
  1035. ret = dma_async_device_register(&priv->slave);
  1036. if (ret) {
  1037. dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
  1038. goto err_clk_disable;
  1039. }
  1040. ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
  1041. priv);
  1042. if (ret) {
  1043. dev_err(&pdev->dev, "of_dma_controller_register failed\n");
  1044. goto err_dma_unregister;
  1045. }
  1046. dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
  1047. return 0;
  1048. err_dma_unregister:
  1049. dma_async_device_unregister(&priv->slave);
  1050. err_clk_disable:
  1051. clk_disable_unprepare(priv->clk);
  1052. return ret;
  1053. }
  1054. static int sun4i_dma_remove(struct platform_device *pdev)
  1055. {
  1056. struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
  1057. /* Disable IRQ so no more work is scheduled */
  1058. disable_irq(priv->irq);
  1059. of_dma_controller_free(pdev->dev.of_node);
  1060. dma_async_device_unregister(&priv->slave);
  1061. clk_disable_unprepare(priv->clk);
  1062. return 0;
  1063. }
  1064. static const struct of_device_id sun4i_dma_match[] = {
  1065. { .compatible = "allwinner,sun4i-a10-dma" },
  1066. { /* sentinel */ },
  1067. };
  1068. MODULE_DEVICE_TABLE(of, sun4i_dma_match);
  1069. static struct platform_driver sun4i_dma_driver = {
  1070. .probe = sun4i_dma_probe,
  1071. .remove = sun4i_dma_remove,
  1072. .driver = {
  1073. .name = "sun4i-dma",
  1074. .of_match_table = sun4i_dma_match,
  1075. },
  1076. };
  1077. module_platform_driver(sun4i_dma_driver);
  1078. MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
  1079. MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
  1080. MODULE_LICENSE("GPL");