stm32-mdma.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) STMicroelectronics SA 2017
  5. * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  6. * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
  7. *
  8. * Driver for STM32 MDMA controller
  9. *
  10. * Inspired by stm32-dma.c and dma-jz4780.c
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/delay.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/err.h>
  18. #include <linux/init.h>
  19. #include <linux/iopoll.h>
  20. #include <linux/jiffies.h>
  21. #include <linux/list.h>
  22. #include <linux/log2.h>
  23. #include <linux/module.h>
  24. #include <linux/of.h>
  25. #include <linux/of_device.h>
  26. #include <linux/of_dma.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/reset.h>
  30. #include <linux/slab.h>
  31. #include "virt-dma.h"
  32. /* MDMA Generic getter/setter */
  33. #define STM32_MDMA_SHIFT(n) (ffs(n) - 1)
  34. #define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \
  35. (mask))
  36. #define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \
  37. STM32_MDMA_SHIFT(mask))
  38. #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
  39. #define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
  40. /* MDMA Channel x interrupt/status register */
  41. #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
  42. #define STM32_MDMA_CISR_CRQA BIT(16)
  43. #define STM32_MDMA_CISR_TCIF BIT(4)
  44. #define STM32_MDMA_CISR_BTIF BIT(3)
  45. #define STM32_MDMA_CISR_BRTIF BIT(2)
  46. #define STM32_MDMA_CISR_CTCIF BIT(1)
  47. #define STM32_MDMA_CISR_TEIF BIT(0)
  48. /* MDMA Channel x interrupt flag clear register */
  49. #define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x))
  50. #define STM32_MDMA_CIFCR_CLTCIF BIT(4)
  51. #define STM32_MDMA_CIFCR_CBTIF BIT(3)
  52. #define STM32_MDMA_CIFCR_CBRTIF BIT(2)
  53. #define STM32_MDMA_CIFCR_CCTCIF BIT(1)
  54. #define STM32_MDMA_CIFCR_CTEIF BIT(0)
  55. #define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \
  56. | STM32_MDMA_CIFCR_CBTIF \
  57. | STM32_MDMA_CIFCR_CBRTIF \
  58. | STM32_MDMA_CIFCR_CCTCIF \
  59. | STM32_MDMA_CIFCR_CTEIF)
  60. /* MDMA Channel x error status register */
  61. #define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x))
  62. #define STM32_MDMA_CESR_BSE BIT(11)
  63. #define STM32_MDMA_CESR_ASR BIT(10)
  64. #define STM32_MDMA_CESR_TEMD BIT(9)
  65. #define STM32_MDMA_CESR_TELD BIT(8)
  66. #define STM32_MDMA_CESR_TED BIT(7)
  67. #define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0)
  68. /* MDMA Channel x control register */
  69. #define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x))
  70. #define STM32_MDMA_CCR_SWRQ BIT(16)
  71. #define STM32_MDMA_CCR_WEX BIT(14)
  72. #define STM32_MDMA_CCR_HEX BIT(13)
  73. #define STM32_MDMA_CCR_BEX BIT(12)
  74. #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
  75. #define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \
  76. STM32_MDMA_CCR_PL_MASK)
  77. #define STM32_MDMA_CCR_TCIE BIT(5)
  78. #define STM32_MDMA_CCR_BTIE BIT(4)
  79. #define STM32_MDMA_CCR_BRTIE BIT(3)
  80. #define STM32_MDMA_CCR_CTCIE BIT(2)
  81. #define STM32_MDMA_CCR_TEIE BIT(1)
  82. #define STM32_MDMA_CCR_EN BIT(0)
  83. #define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \
  84. | STM32_MDMA_CCR_BTIE \
  85. | STM32_MDMA_CCR_BRTIE \
  86. | STM32_MDMA_CCR_CTCIE \
  87. | STM32_MDMA_CCR_TEIE)
  88. /* MDMA Channel x transfer configuration register */
  89. #define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x))
  90. #define STM32_MDMA_CTCR_BWM BIT(31)
  91. #define STM32_MDMA_CTCR_SWRM BIT(30)
  92. #define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
  93. #define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \
  94. STM32_MDMA_CTCR_TRGM_MSK)
  95. #define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \
  96. STM32_MDMA_CTCR_TRGM_MSK)
  97. #define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
  98. #define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \
  99. STM32_MDMA_CTCR_PAM_MASK)
  100. #define STM32_MDMA_CTCR_PKE BIT(25)
  101. #define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
  102. #define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \
  103. STM32_MDMA_CTCR_TLEN_MSK)
  104. #define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \
  105. STM32_MDMA_CTCR_TLEN_MSK)
  106. #define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
  107. #define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \
  108. STM32_MDMA_CTCR_LEN2_MSK)
  109. #define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \
  110. STM32_MDMA_CTCR_LEN2_MSK)
  111. #define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
  112. #define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \
  113. STM32_MDMA_CTCR_DBURST_MASK)
  114. #define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
  115. #define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \
  116. STM32_MDMA_CTCR_SBURST_MASK)
  117. #define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
  118. #define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \
  119. STM32_MDMA_CTCR_DINCOS_MASK)
  120. #define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
  121. #define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \
  122. STM32_MDMA_CTCR_SINCOS_MASK)
  123. #define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
  124. #define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \
  125. STM32_MDMA_CTCR_DSIZE_MASK)
  126. #define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
  127. #define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \
  128. STM32_MDMA_CTCR_SSIZE_MASK)
  129. #define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
  130. #define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \
  131. STM32_MDMA_CTCR_DINC_MASK)
  132. #define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
  133. #define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \
  134. STM32_MDMA_CTCR_SINC_MASK)
  135. #define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
  136. | STM32_MDMA_CTCR_DINC_MASK \
  137. | STM32_MDMA_CTCR_SINCOS_MASK \
  138. | STM32_MDMA_CTCR_DINCOS_MASK \
  139. | STM32_MDMA_CTCR_LEN2_MSK \
  140. | STM32_MDMA_CTCR_TRGM_MSK)
  141. /* MDMA Channel x block number of data register */
  142. #define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
  143. #define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
  144. #define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \
  145. STM32_MDMA_CBNDTR_BRC_MK)
  146. #define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \
  147. STM32_MDMA_CBNDTR_BRC_MK)
  148. #define STM32_MDMA_CBNDTR_BRDUM BIT(19)
  149. #define STM32_MDMA_CBNDTR_BRSUM BIT(18)
  150. #define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
  151. #define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \
  152. STM32_MDMA_CBNDTR_BNDT_MASK)
  153. /* MDMA Channel x source address register */
  154. #define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
  155. /* MDMA Channel x destination address register */
  156. #define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x))
  157. /* MDMA Channel x block repeat address update register */
  158. #define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
  159. #define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
  160. #define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \
  161. STM32_MDMA_CBRUR_DUV_MASK)
  162. #define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
  163. #define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \
  164. STM32_MDMA_CBRUR_SUV_MASK)
  165. /* MDMA Channel x link address register */
  166. #define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
  167. /* MDMA Channel x trigger and bus selection register */
  168. #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x))
  169. #define STM32_MDMA_CTBR_DBUS BIT(17)
  170. #define STM32_MDMA_CTBR_SBUS BIT(16)
  171. #define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0)
  172. #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \
  173. STM32_MDMA_CTBR_TSEL_MASK)
  174. /* MDMA Channel x mask address register */
  175. #define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
  176. /* MDMA Channel x mask data register */
  177. #define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x))
  178. #define STM32_MDMA_MAX_BUF_LEN 128
  179. #define STM32_MDMA_MAX_BLOCK_LEN 65536
  180. #define STM32_MDMA_MAX_CHANNELS 63
  181. #define STM32_MDMA_MAX_REQUESTS 256
  182. #define STM32_MDMA_MAX_BURST 128
  183. #define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
  184. enum stm32_mdma_trigger_mode {
  185. STM32_MDMA_BUFFER,
  186. STM32_MDMA_BLOCK,
  187. STM32_MDMA_BLOCK_REP,
  188. STM32_MDMA_LINKED_LIST,
  189. };
  190. enum stm32_mdma_width {
  191. STM32_MDMA_BYTE,
  192. STM32_MDMA_HALF_WORD,
  193. STM32_MDMA_WORD,
  194. STM32_MDMA_DOUBLE_WORD,
  195. };
  196. enum stm32_mdma_inc_mode {
  197. STM32_MDMA_FIXED = 0,
  198. STM32_MDMA_INC = 2,
  199. STM32_MDMA_DEC = 3,
  200. };
  201. struct stm32_mdma_chan_config {
  202. u32 request;
  203. u32 priority_level;
  204. u32 transfer_config;
  205. u32 mask_addr;
  206. u32 mask_data;
  207. };
  208. struct stm32_mdma_hwdesc {
  209. u32 ctcr;
  210. u32 cbndtr;
  211. u32 csar;
  212. u32 cdar;
  213. u32 cbrur;
  214. u32 clar;
  215. u32 ctbr;
  216. u32 dummy;
  217. u32 cmar;
  218. u32 cmdr;
  219. } __aligned(64);
  220. struct stm32_mdma_desc_node {
  221. struct stm32_mdma_hwdesc *hwdesc;
  222. dma_addr_t hwdesc_phys;
  223. };
  224. struct stm32_mdma_desc {
  225. struct virt_dma_desc vdesc;
  226. u32 ccr;
  227. bool cyclic;
  228. u32 count;
  229. struct stm32_mdma_desc_node node[];
  230. };
  231. struct stm32_mdma_chan {
  232. struct virt_dma_chan vchan;
  233. struct dma_pool *desc_pool;
  234. u32 id;
  235. struct stm32_mdma_desc *desc;
  236. u32 curr_hwdesc;
  237. struct dma_slave_config dma_config;
  238. struct stm32_mdma_chan_config chan_config;
  239. bool busy;
  240. u32 mem_burst;
  241. u32 mem_width;
  242. };
  243. struct stm32_mdma_device {
  244. struct dma_device ddev;
  245. void __iomem *base;
  246. struct clk *clk;
  247. int irq;
  248. u32 nr_channels;
  249. u32 nr_requests;
  250. u32 nr_ahb_addr_masks;
  251. struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
  252. u32 ahb_addr_masks[];
  253. };
  254. static struct stm32_mdma_device *stm32_mdma_get_dev(
  255. struct stm32_mdma_chan *chan)
  256. {
  257. return container_of(chan->vchan.chan.device, struct stm32_mdma_device,
  258. ddev);
  259. }
  260. static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c)
  261. {
  262. return container_of(c, struct stm32_mdma_chan, vchan.chan);
  263. }
  264. static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc)
  265. {
  266. return container_of(vdesc, struct stm32_mdma_desc, vdesc);
  267. }
  268. static struct device *chan2dev(struct stm32_mdma_chan *chan)
  269. {
  270. return &chan->vchan.chan.dev->device;
  271. }
  272. static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev)
  273. {
  274. return mdma_dev->ddev.dev;
  275. }
  276. static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg)
  277. {
  278. return readl_relaxed(dmadev->base + reg);
  279. }
  280. static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val)
  281. {
  282. writel_relaxed(val, dmadev->base + reg);
  283. }
  284. static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg,
  285. u32 mask)
  286. {
  287. void __iomem *addr = dmadev->base + reg;
  288. writel_relaxed(readl_relaxed(addr) | mask, addr);
  289. }
  290. static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg,
  291. u32 mask)
  292. {
  293. void __iomem *addr = dmadev->base + reg;
  294. writel_relaxed(readl_relaxed(addr) & ~mask, addr);
  295. }
  296. static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
  297. struct stm32_mdma_chan *chan, u32 count)
  298. {
  299. struct stm32_mdma_desc *desc;
  300. int i;
  301. desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT);
  302. if (!desc)
  303. return NULL;
  304. for (i = 0; i < count; i++) {
  305. desc->node[i].hwdesc =
  306. dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
  307. &desc->node[i].hwdesc_phys);
  308. if (!desc->node[i].hwdesc)
  309. goto err;
  310. }
  311. desc->count = count;
  312. return desc;
  313. err:
  314. dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
  315. while (--i >= 0)
  316. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  317. desc->node[i].hwdesc_phys);
  318. kfree(desc);
  319. return NULL;
  320. }
  321. static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
  322. {
  323. struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
  324. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
  325. int i;
  326. for (i = 0; i < desc->count; i++)
  327. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  328. desc->node[i].hwdesc_phys);
  329. kfree(desc);
  330. }
  331. static int stm32_mdma_get_width(struct stm32_mdma_chan *chan,
  332. enum dma_slave_buswidth width)
  333. {
  334. switch (width) {
  335. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  336. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  337. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  338. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  339. return ffs(width) - 1;
  340. default:
  341. dev_err(chan2dev(chan), "Dma bus width %i not supported\n",
  342. width);
  343. return -EINVAL;
  344. }
  345. }
  346. static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
  347. u32 buf_len, u32 tlen)
  348. {
  349. enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
  350. for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
  351. max_width > DMA_SLAVE_BUSWIDTH_1_BYTE;
  352. max_width >>= 1) {
  353. /*
  354. * Address and buffer length both have to be aligned on
  355. * bus width
  356. */
  357. if ((((buf_len | addr) & (max_width - 1)) == 0) &&
  358. tlen >= max_width)
  359. break;
  360. }
  361. return max_width;
  362. }
  363. static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
  364. enum dma_slave_buswidth width)
  365. {
  366. u32 best_burst;
  367. best_burst = min((u32)1 << __ffs(tlen | buf_len),
  368. max_burst * width) / width;
  369. return (best_burst > 0) ? best_burst : 1;
  370. }
  371. static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan)
  372. {
  373. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  374. u32 ccr, cisr, id, reg;
  375. int ret;
  376. id = chan->id;
  377. reg = STM32_MDMA_CCR(id);
  378. /* Disable interrupts */
  379. stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK);
  380. ccr = stm32_mdma_read(dmadev, reg);
  381. if (ccr & STM32_MDMA_CCR_EN) {
  382. stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN);
  383. /* Ensure that any ongoing transfer has been completed */
  384. ret = readl_relaxed_poll_timeout_atomic(
  385. dmadev->base + STM32_MDMA_CISR(id), cisr,
  386. (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000);
  387. if (ret) {
  388. dev_err(chan2dev(chan), "%s: timeout!\n", __func__);
  389. return -EBUSY;
  390. }
  391. }
  392. return 0;
  393. }
  394. static void stm32_mdma_stop(struct stm32_mdma_chan *chan)
  395. {
  396. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  397. u32 status;
  398. int ret;
  399. /* Disable DMA */
  400. ret = stm32_mdma_disable_chan(chan);
  401. if (ret < 0)
  402. return;
  403. /* Clear interrupt status if it is there */
  404. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
  405. if (status) {
  406. dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
  407. __func__, status);
  408. stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
  409. }
  410. chan->busy = false;
  411. }
  412. static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr,
  413. u32 ctbr_mask, u32 src_addr)
  414. {
  415. u32 mask;
  416. int i;
  417. /* Check if memory device is on AHB or AXI */
  418. *ctbr &= ~ctbr_mask;
  419. mask = src_addr & 0xF0000000;
  420. for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) {
  421. if (mask == dmadev->ahb_addr_masks[i]) {
  422. *ctbr |= ctbr_mask;
  423. break;
  424. }
  425. }
  426. }
  427. static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
  428. enum dma_transfer_direction direction,
  429. u32 *mdma_ccr, u32 *mdma_ctcr,
  430. u32 *mdma_ctbr, dma_addr_t addr,
  431. u32 buf_len)
  432. {
  433. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  434. struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
  435. enum dma_slave_buswidth src_addr_width, dst_addr_width;
  436. phys_addr_t src_addr, dst_addr;
  437. int src_bus_width, dst_bus_width;
  438. u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
  439. u32 ccr, ctcr, ctbr, tlen;
  440. src_addr_width = chan->dma_config.src_addr_width;
  441. dst_addr_width = chan->dma_config.dst_addr_width;
  442. src_maxburst = chan->dma_config.src_maxburst;
  443. dst_maxburst = chan->dma_config.dst_maxburst;
  444. ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
  445. ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
  446. ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
  447. /* Enable HW request mode */
  448. ctcr &= ~STM32_MDMA_CTCR_SWRM;
  449. /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */
  450. ctcr &= ~STM32_MDMA_CTCR_CFG_MASK;
  451. ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK;
  452. /*
  453. * For buffer transfer length (TLEN) we have to set
  454. * the number of bytes - 1 in CTCR register
  455. */
  456. tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr);
  457. ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK;
  458. ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
  459. /* Disable Pack Enable */
  460. ctcr &= ~STM32_MDMA_CTCR_PKE;
  461. /* Check burst size constraints */
  462. if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST ||
  463. dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) {
  464. dev_err(chan2dev(chan),
  465. "burst size * bus width higher than %d bytes\n",
  466. STM32_MDMA_MAX_BURST);
  467. return -EINVAL;
  468. }
  469. if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) ||
  470. (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) {
  471. dev_err(chan2dev(chan), "burst size must be a power of 2\n");
  472. return -EINVAL;
  473. }
  474. /*
  475. * Configure channel control:
  476. * - Clear SW request as in this case this is a HW one
  477. * - Clear WEX, HEX and BEX bits
  478. * - Set priority level
  479. */
  480. ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
  481. STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK);
  482. ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level);
  483. /* Configure Trigger selection */
  484. ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
  485. ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request);
  486. switch (direction) {
  487. case DMA_MEM_TO_DEV:
  488. dst_addr = chan->dma_config.dst_addr;
  489. /* Set device data size */
  490. dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
  491. if (dst_bus_width < 0)
  492. return dst_bus_width;
  493. ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
  494. ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
  495. /* Set device burst value */
  496. dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  497. dst_maxburst,
  498. dst_addr_width);
  499. chan->mem_burst = dst_best_burst;
  500. ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
  501. ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
  502. /* Set memory data size */
  503. src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
  504. chan->mem_width = src_addr_width;
  505. src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
  506. if (src_bus_width < 0)
  507. return src_bus_width;
  508. ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK |
  509. STM32_MDMA_CTCR_SINCOS_MASK;
  510. ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) |
  511. STM32_MDMA_CTCR_SINCOS(src_bus_width);
  512. /* Set memory burst value */
  513. src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
  514. src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  515. src_maxburst,
  516. src_addr_width);
  517. chan->mem_burst = src_best_burst;
  518. ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
  519. ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
  520. /* Select bus */
  521. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
  522. dst_addr);
  523. if (dst_bus_width != src_bus_width)
  524. ctcr |= STM32_MDMA_CTCR_PKE;
  525. /* Set destination address */
  526. stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr);
  527. break;
  528. case DMA_DEV_TO_MEM:
  529. src_addr = chan->dma_config.src_addr;
  530. /* Set device data size */
  531. src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
  532. if (src_bus_width < 0)
  533. return src_bus_width;
  534. ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
  535. ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
  536. /* Set device burst value */
  537. src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  538. src_maxburst,
  539. src_addr_width);
  540. ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
  541. ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
  542. /* Set memory data size */
  543. dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
  544. chan->mem_width = dst_addr_width;
  545. dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
  546. if (dst_bus_width < 0)
  547. return dst_bus_width;
  548. ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK |
  549. STM32_MDMA_CTCR_DINCOS_MASK);
  550. ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
  551. STM32_MDMA_CTCR_DINCOS(dst_bus_width);
  552. /* Set memory burst value */
  553. dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
  554. dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  555. dst_maxburst,
  556. dst_addr_width);
  557. ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
  558. ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
  559. /* Select bus */
  560. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
  561. src_addr);
  562. if (dst_bus_width != src_bus_width)
  563. ctcr |= STM32_MDMA_CTCR_PKE;
  564. /* Set source address */
  565. stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr);
  566. break;
  567. default:
  568. dev_err(chan2dev(chan), "Dma direction is not supported\n");
  569. return -EINVAL;
  570. }
  571. *mdma_ccr = ccr;
  572. *mdma_ctcr = ctcr;
  573. *mdma_ctbr = ctbr;
  574. return 0;
  575. }
  576. static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
  577. struct stm32_mdma_desc_node *node)
  578. {
  579. dev_dbg(chan2dev(chan), "hwdesc: %pad\n", &node->hwdesc_phys);
  580. dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", node->hwdesc->ctcr);
  581. dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", node->hwdesc->cbndtr);
  582. dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", node->hwdesc->csar);
  583. dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", node->hwdesc->cdar);
  584. dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", node->hwdesc->cbrur);
  585. dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", node->hwdesc->clar);
  586. dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", node->hwdesc->ctbr);
  587. dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", node->hwdesc->cmar);
  588. dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", node->hwdesc->cmdr);
  589. }
  590. static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
  591. struct stm32_mdma_desc *desc,
  592. enum dma_transfer_direction dir, u32 count,
  593. dma_addr_t src_addr, dma_addr_t dst_addr,
  594. u32 len, u32 ctcr, u32 ctbr, bool is_last,
  595. bool is_first, bool is_cyclic)
  596. {
  597. struct stm32_mdma_chan_config *config = &chan->chan_config;
  598. struct stm32_mdma_hwdesc *hwdesc;
  599. u32 next = count + 1;
  600. hwdesc = desc->node[count].hwdesc;
  601. hwdesc->ctcr = ctcr;
  602. hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
  603. STM32_MDMA_CBNDTR_BRDUM |
  604. STM32_MDMA_CBNDTR_BRSUM |
  605. STM32_MDMA_CBNDTR_BNDT_MASK);
  606. hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
  607. hwdesc->csar = src_addr;
  608. hwdesc->cdar = dst_addr;
  609. hwdesc->cbrur = 0;
  610. hwdesc->ctbr = ctbr;
  611. hwdesc->cmar = config->mask_addr;
  612. hwdesc->cmdr = config->mask_data;
  613. if (is_last) {
  614. if (is_cyclic)
  615. hwdesc->clar = desc->node[0].hwdesc_phys;
  616. else
  617. hwdesc->clar = 0;
  618. } else {
  619. hwdesc->clar = desc->node[next].hwdesc_phys;
  620. }
  621. stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
  622. }
  623. static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
  624. struct stm32_mdma_desc *desc,
  625. struct scatterlist *sgl, u32 sg_len,
  626. enum dma_transfer_direction direction)
  627. {
  628. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  629. struct dma_slave_config *dma_config = &chan->dma_config;
  630. struct scatterlist *sg;
  631. dma_addr_t src_addr, dst_addr;
  632. u32 ccr, ctcr, ctbr;
  633. int i, ret = 0;
  634. for_each_sg(sgl, sg, sg_len, i) {
  635. if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
  636. dev_err(chan2dev(chan), "Invalid block len\n");
  637. return -EINVAL;
  638. }
  639. if (direction == DMA_MEM_TO_DEV) {
  640. src_addr = sg_dma_address(sg);
  641. dst_addr = dma_config->dst_addr;
  642. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
  643. &ctcr, &ctbr, src_addr,
  644. sg_dma_len(sg));
  645. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
  646. src_addr);
  647. } else {
  648. src_addr = dma_config->src_addr;
  649. dst_addr = sg_dma_address(sg);
  650. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
  651. &ctcr, &ctbr, dst_addr,
  652. sg_dma_len(sg));
  653. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
  654. dst_addr);
  655. }
  656. if (ret < 0)
  657. return ret;
  658. stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
  659. dst_addr, sg_dma_len(sg), ctcr, ctbr,
  660. i == sg_len - 1, i == 0, false);
  661. }
  662. /* Enable interrupts */
  663. ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
  664. ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
  665. if (sg_len > 1)
  666. ccr |= STM32_MDMA_CCR_BTIE;
  667. desc->ccr = ccr;
  668. return 0;
  669. }
  670. static struct dma_async_tx_descriptor *
  671. stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
  672. u32 sg_len, enum dma_transfer_direction direction,
  673. unsigned long flags, void *context)
  674. {
  675. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  676. struct stm32_mdma_desc *desc;
  677. int i, ret;
  678. /*
  679. * Once DMA is in setup cyclic mode the channel we cannot assign this
  680. * channel anymore. The DMA channel needs to be aborted or terminated
  681. * for allowing another request.
  682. */
  683. if (chan->desc && chan->desc->cyclic) {
  684. dev_err(chan2dev(chan),
  685. "Request not allowed when dma in cyclic mode\n");
  686. return NULL;
  687. }
  688. desc = stm32_mdma_alloc_desc(chan, sg_len);
  689. if (!desc)
  690. return NULL;
  691. ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction);
  692. if (ret < 0)
  693. goto xfer_setup_err;
  694. desc->cyclic = false;
  695. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  696. xfer_setup_err:
  697. for (i = 0; i < desc->count; i++)
  698. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  699. desc->node[i].hwdesc_phys);
  700. kfree(desc);
  701. return NULL;
  702. }
  703. static struct dma_async_tx_descriptor *
  704. stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
  705. size_t buf_len, size_t period_len,
  706. enum dma_transfer_direction direction,
  707. unsigned long flags)
  708. {
  709. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  710. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  711. struct dma_slave_config *dma_config = &chan->dma_config;
  712. struct stm32_mdma_desc *desc;
  713. dma_addr_t src_addr, dst_addr;
  714. u32 ccr, ctcr, ctbr, count;
  715. int i, ret;
  716. /*
  717. * Once DMA is in setup cyclic mode the channel we cannot assign this
  718. * channel anymore. The DMA channel needs to be aborted or terminated
  719. * for allowing another request.
  720. */
  721. if (chan->desc && chan->desc->cyclic) {
  722. dev_err(chan2dev(chan),
  723. "Request not allowed when dma in cyclic mode\n");
  724. return NULL;
  725. }
  726. if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) {
  727. dev_err(chan2dev(chan), "Invalid buffer/period len\n");
  728. return NULL;
  729. }
  730. if (buf_len % period_len) {
  731. dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
  732. return NULL;
  733. }
  734. count = buf_len / period_len;
  735. desc = stm32_mdma_alloc_desc(chan, count);
  736. if (!desc)
  737. return NULL;
  738. /* Select bus */
  739. if (direction == DMA_MEM_TO_DEV) {
  740. src_addr = buf_addr;
  741. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
  742. &ctbr, src_addr, period_len);
  743. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
  744. src_addr);
  745. } else {
  746. dst_addr = buf_addr;
  747. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
  748. &ctbr, dst_addr, period_len);
  749. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
  750. dst_addr);
  751. }
  752. if (ret < 0)
  753. goto xfer_setup_err;
  754. /* Enable interrupts */
  755. ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
  756. ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE;
  757. desc->ccr = ccr;
  758. /* Configure hwdesc list */
  759. for (i = 0; i < count; i++) {
  760. if (direction == DMA_MEM_TO_DEV) {
  761. src_addr = buf_addr + i * period_len;
  762. dst_addr = dma_config->dst_addr;
  763. } else {
  764. src_addr = dma_config->src_addr;
  765. dst_addr = buf_addr + i * period_len;
  766. }
  767. stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
  768. dst_addr, period_len, ctcr, ctbr,
  769. i == count - 1, i == 0, true);
  770. }
  771. desc->cyclic = true;
  772. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  773. xfer_setup_err:
  774. for (i = 0; i < desc->count; i++)
  775. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  776. desc->node[i].hwdesc_phys);
  777. kfree(desc);
  778. return NULL;
  779. }
  780. static struct dma_async_tx_descriptor *
  781. stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
  782. size_t len, unsigned long flags)
  783. {
  784. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  785. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  786. enum dma_slave_buswidth max_width;
  787. struct stm32_mdma_desc *desc;
  788. struct stm32_mdma_hwdesc *hwdesc;
  789. u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst;
  790. u32 best_burst, tlen;
  791. size_t xfer_count, offset;
  792. int src_bus_width, dst_bus_width;
  793. int i;
  794. /*
  795. * Once DMA is in setup cyclic mode the channel we cannot assign this
  796. * channel anymore. The DMA channel needs to be aborted or terminated
  797. * to allow another request
  798. */
  799. if (chan->desc && chan->desc->cyclic) {
  800. dev_err(chan2dev(chan),
  801. "Request not allowed when dma in cyclic mode\n");
  802. return NULL;
  803. }
  804. count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN);
  805. desc = stm32_mdma_alloc_desc(chan, count);
  806. if (!desc)
  807. return NULL;
  808. ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
  809. ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
  810. ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
  811. cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
  812. /* Enable sw req, some interrupts and clear other bits */
  813. ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
  814. STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK |
  815. STM32_MDMA_CCR_IRQ_MASK);
  816. ccr |= STM32_MDMA_CCR_TEIE;
  817. /* Enable SW request mode, dest/src inc and clear other bits */
  818. ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK |
  819. STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE |
  820. STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK |
  821. STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK |
  822. STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK |
  823. STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK |
  824. STM32_MDMA_CTCR_SINC_MASK);
  825. ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) |
  826. STM32_MDMA_CTCR_DINC(STM32_MDMA_INC);
  827. /* Reset HW request */
  828. ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
  829. /* Select bus */
  830. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src);
  831. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest);
  832. /* Clear CBNDTR registers */
  833. cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM |
  834. STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK);
  835. if (len <= STM32_MDMA_MAX_BLOCK_LEN) {
  836. cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
  837. if (len <= STM32_MDMA_MAX_BUF_LEN) {
  838. /* Setup a buffer transfer */
  839. ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE;
  840. ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER);
  841. } else {
  842. /* Setup a block transfer */
  843. ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
  844. ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK);
  845. }
  846. tlen = STM32_MDMA_MAX_BUF_LEN;
  847. ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
  848. /* Set source best burst size */
  849. max_width = stm32_mdma_get_max_width(src, len, tlen);
  850. src_bus_width = stm32_mdma_get_width(chan, max_width);
  851. max_burst = tlen / max_width;
  852. best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
  853. max_width);
  854. mdma_burst = ilog2(best_burst);
  855. ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
  856. STM32_MDMA_CTCR_SSIZE(src_bus_width) |
  857. STM32_MDMA_CTCR_SINCOS(src_bus_width);
  858. /* Set destination best burst size */
  859. max_width = stm32_mdma_get_max_width(dest, len, tlen);
  860. dst_bus_width = stm32_mdma_get_width(chan, max_width);
  861. max_burst = tlen / max_width;
  862. best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
  863. max_width);
  864. mdma_burst = ilog2(best_burst);
  865. ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
  866. STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
  867. STM32_MDMA_CTCR_DINCOS(dst_bus_width);
  868. if (dst_bus_width != src_bus_width)
  869. ctcr |= STM32_MDMA_CTCR_PKE;
  870. /* Prepare hardware descriptor */
  871. hwdesc = desc->node[0].hwdesc;
  872. hwdesc->ctcr = ctcr;
  873. hwdesc->cbndtr = cbndtr;
  874. hwdesc->csar = src;
  875. hwdesc->cdar = dest;
  876. hwdesc->cbrur = 0;
  877. hwdesc->clar = 0;
  878. hwdesc->ctbr = ctbr;
  879. hwdesc->cmar = 0;
  880. hwdesc->cmdr = 0;
  881. stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
  882. } else {
  883. /* Setup a LLI transfer */
  884. ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
  885. STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1));
  886. ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
  887. tlen = STM32_MDMA_MAX_BUF_LEN;
  888. for (i = 0, offset = 0; offset < len;
  889. i++, offset += xfer_count) {
  890. xfer_count = min_t(size_t, len - offset,
  891. STM32_MDMA_MAX_BLOCK_LEN);
  892. /* Set source best burst size */
  893. max_width = stm32_mdma_get_max_width(src, len, tlen);
  894. src_bus_width = stm32_mdma_get_width(chan, max_width);
  895. max_burst = tlen / max_width;
  896. best_burst = stm32_mdma_get_best_burst(len, tlen,
  897. max_burst,
  898. max_width);
  899. mdma_burst = ilog2(best_burst);
  900. ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
  901. STM32_MDMA_CTCR_SSIZE(src_bus_width) |
  902. STM32_MDMA_CTCR_SINCOS(src_bus_width);
  903. /* Set destination best burst size */
  904. max_width = stm32_mdma_get_max_width(dest, len, tlen);
  905. dst_bus_width = stm32_mdma_get_width(chan, max_width);
  906. max_burst = tlen / max_width;
  907. best_burst = stm32_mdma_get_best_burst(len, tlen,
  908. max_burst,
  909. max_width);
  910. mdma_burst = ilog2(best_burst);
  911. ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
  912. STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
  913. STM32_MDMA_CTCR_DINCOS(dst_bus_width);
  914. if (dst_bus_width != src_bus_width)
  915. ctcr |= STM32_MDMA_CTCR_PKE;
  916. /* Prepare hardware descriptor */
  917. stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i,
  918. src + offset, dest + offset,
  919. xfer_count, ctcr, ctbr,
  920. i == count - 1, i == 0, false);
  921. }
  922. }
  923. desc->ccr = ccr;
  924. desc->cyclic = false;
  925. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  926. }
  927. static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan)
  928. {
  929. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  930. dev_dbg(chan2dev(chan), "CCR: 0x%08x\n",
  931. stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)));
  932. dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n",
  933. stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)));
  934. dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n",
  935. stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)));
  936. dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n",
  937. stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id)));
  938. dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n",
  939. stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id)));
  940. dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n",
  941. stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id)));
  942. dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n",
  943. stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)));
  944. dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n",
  945. stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)));
  946. dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n",
  947. stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id)));
  948. dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n",
  949. stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id)));
  950. }
  951. static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
  952. {
  953. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  954. struct virt_dma_desc *vdesc;
  955. struct stm32_mdma_hwdesc *hwdesc;
  956. u32 id = chan->id;
  957. u32 status, reg;
  958. vdesc = vchan_next_desc(&chan->vchan);
  959. if (!vdesc) {
  960. chan->desc = NULL;
  961. return;
  962. }
  963. list_del(&vdesc->node);
  964. chan->desc = to_stm32_mdma_desc(vdesc);
  965. hwdesc = chan->desc->node[0].hwdesc;
  966. chan->curr_hwdesc = 0;
  967. stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
  968. stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr);
  969. stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr);
  970. stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar);
  971. stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar);
  972. stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur);
  973. stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar);
  974. stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr);
  975. stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar);
  976. stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr);
  977. /* Clear interrupt status if it is there */
  978. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
  979. if (status)
  980. stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status);
  981. stm32_mdma_dump_reg(chan);
  982. /* Start DMA */
  983. stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN);
  984. /* Set SW request in case of MEM2MEM transfer */
  985. if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) {
  986. reg = STM32_MDMA_CCR(id);
  987. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
  988. }
  989. chan->busy = true;
  990. dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
  991. }
  992. static void stm32_mdma_issue_pending(struct dma_chan *c)
  993. {
  994. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  995. unsigned long flags;
  996. spin_lock_irqsave(&chan->vchan.lock, flags);
  997. if (!vchan_issue_pending(&chan->vchan))
  998. goto end;
  999. dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
  1000. if (!chan->desc && !chan->busy)
  1001. stm32_mdma_start_transfer(chan);
  1002. end:
  1003. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1004. }
  1005. static int stm32_mdma_pause(struct dma_chan *c)
  1006. {
  1007. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1008. unsigned long flags;
  1009. int ret;
  1010. spin_lock_irqsave(&chan->vchan.lock, flags);
  1011. ret = stm32_mdma_disable_chan(chan);
  1012. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1013. if (!ret)
  1014. dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
  1015. return ret;
  1016. }
  1017. static int stm32_mdma_resume(struct dma_chan *c)
  1018. {
  1019. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1020. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1021. struct stm32_mdma_hwdesc *hwdesc;
  1022. unsigned long flags;
  1023. u32 status, reg;
  1024. hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
  1025. spin_lock_irqsave(&chan->vchan.lock, flags);
  1026. /* Re-configure control register */
  1027. stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr);
  1028. /* Clear interrupt status if it is there */
  1029. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
  1030. if (status)
  1031. stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
  1032. stm32_mdma_dump_reg(chan);
  1033. /* Re-start DMA */
  1034. reg = STM32_MDMA_CCR(chan->id);
  1035. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN);
  1036. /* Set SW request in case of MEM2MEM transfer */
  1037. if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM)
  1038. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
  1039. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1040. dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
  1041. return 0;
  1042. }
  1043. static int stm32_mdma_terminate_all(struct dma_chan *c)
  1044. {
  1045. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1046. unsigned long flags;
  1047. LIST_HEAD(head);
  1048. spin_lock_irqsave(&chan->vchan.lock, flags);
  1049. if (chan->desc) {
  1050. vchan_terminate_vdesc(&chan->desc->vdesc);
  1051. if (chan->busy)
  1052. stm32_mdma_stop(chan);
  1053. chan->desc = NULL;
  1054. }
  1055. vchan_get_all_descriptors(&chan->vchan, &head);
  1056. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1057. vchan_dma_desc_free_list(&chan->vchan, &head);
  1058. return 0;
  1059. }
  1060. static void stm32_mdma_synchronize(struct dma_chan *c)
  1061. {
  1062. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1063. vchan_synchronize(&chan->vchan);
  1064. }
  1065. static int stm32_mdma_slave_config(struct dma_chan *c,
  1066. struct dma_slave_config *config)
  1067. {
  1068. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1069. memcpy(&chan->dma_config, config, sizeof(*config));
  1070. return 0;
  1071. }
  1072. static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
  1073. struct stm32_mdma_desc *desc,
  1074. u32 curr_hwdesc)
  1075. {
  1076. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1077. struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
  1078. u32 cbndtr, residue, modulo, burst_size;
  1079. int i;
  1080. residue = 0;
  1081. for (i = curr_hwdesc + 1; i < desc->count; i++) {
  1082. hwdesc = desc->node[i].hwdesc;
  1083. residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
  1084. }
  1085. cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
  1086. residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
  1087. if (!chan->mem_burst)
  1088. return residue;
  1089. burst_size = chan->mem_burst * chan->mem_width;
  1090. modulo = residue % burst_size;
  1091. if (modulo)
  1092. residue = residue - modulo + burst_size;
  1093. return residue;
  1094. }
  1095. static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
  1096. dma_cookie_t cookie,
  1097. struct dma_tx_state *state)
  1098. {
  1099. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1100. struct virt_dma_desc *vdesc;
  1101. enum dma_status status;
  1102. unsigned long flags;
  1103. u32 residue = 0;
  1104. status = dma_cookie_status(c, cookie, state);
  1105. if ((status == DMA_COMPLETE) || (!state))
  1106. return status;
  1107. spin_lock_irqsave(&chan->vchan.lock, flags);
  1108. vdesc = vchan_find_desc(&chan->vchan, cookie);
  1109. if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
  1110. residue = stm32_mdma_desc_residue(chan, chan->desc,
  1111. chan->curr_hwdesc);
  1112. else if (vdesc)
  1113. residue = stm32_mdma_desc_residue(chan,
  1114. to_stm32_mdma_desc(vdesc), 0);
  1115. dma_set_residue(state, residue);
  1116. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1117. return status;
  1118. }
  1119. static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
  1120. {
  1121. vchan_cookie_complete(&chan->desc->vdesc);
  1122. chan->desc = NULL;
  1123. chan->busy = false;
  1124. /* Start the next transfer if this driver has a next desc */
  1125. stm32_mdma_start_transfer(chan);
  1126. }
  1127. static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
  1128. {
  1129. struct stm32_mdma_device *dmadev = devid;
  1130. struct stm32_mdma_chan *chan = devid;
  1131. u32 reg, id, ien, status, flag;
  1132. /* Find out which channel generates the interrupt */
  1133. status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
  1134. if (status) {
  1135. id = __ffs(status);
  1136. } else {
  1137. status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
  1138. if (!status) {
  1139. dev_dbg(mdma2dev(dmadev), "spurious it\n");
  1140. return IRQ_NONE;
  1141. }
  1142. id = __ffs(status);
  1143. /*
  1144. * As GISR0 provides status for channel id from 0 to 31,
  1145. * so GISR1 provides status for channel id from 32 to 62
  1146. */
  1147. id += 32;
  1148. }
  1149. chan = &dmadev->chan[id];
  1150. if (!chan) {
  1151. dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
  1152. goto exit;
  1153. }
  1154. /* Handle interrupt for the channel */
  1155. spin_lock(&chan->vchan.lock);
  1156. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
  1157. ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
  1158. ien &= STM32_MDMA_CCR_IRQ_MASK;
  1159. ien >>= 1;
  1160. if (!(status & ien)) {
  1161. spin_unlock(&chan->vchan.lock);
  1162. dev_dbg(chan2dev(chan),
  1163. "spurious it (status=0x%04x, ien=0x%04x)\n",
  1164. status, ien);
  1165. return IRQ_NONE;
  1166. }
  1167. flag = __ffs(status & ien);
  1168. reg = STM32_MDMA_CIFCR(chan->id);
  1169. switch (1 << flag) {
  1170. case STM32_MDMA_CISR_TEIF:
  1171. id = chan->id;
  1172. status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id));
  1173. dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status);
  1174. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
  1175. break;
  1176. case STM32_MDMA_CISR_CTCIF:
  1177. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
  1178. stm32_mdma_xfer_end(chan);
  1179. break;
  1180. case STM32_MDMA_CISR_BRTIF:
  1181. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
  1182. break;
  1183. case STM32_MDMA_CISR_BTIF:
  1184. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
  1185. chan->curr_hwdesc++;
  1186. if (chan->desc && chan->desc->cyclic) {
  1187. if (chan->curr_hwdesc == chan->desc->count)
  1188. chan->curr_hwdesc = 0;
  1189. vchan_cyclic_callback(&chan->desc->vdesc);
  1190. }
  1191. break;
  1192. case STM32_MDMA_CISR_TCIF:
  1193. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
  1194. break;
  1195. default:
  1196. dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n",
  1197. 1 << flag, status);
  1198. }
  1199. spin_unlock(&chan->vchan.lock);
  1200. exit:
  1201. return IRQ_HANDLED;
  1202. }
  1203. static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
  1204. {
  1205. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1206. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1207. int ret;
  1208. chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device),
  1209. c->device->dev,
  1210. sizeof(struct stm32_mdma_hwdesc),
  1211. __alignof__(struct stm32_mdma_hwdesc),
  1212. 0);
  1213. if (!chan->desc_pool) {
  1214. dev_err(chan2dev(chan), "failed to allocate descriptor pool\n");
  1215. return -ENOMEM;
  1216. }
  1217. ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
  1218. if (ret < 0)
  1219. return ret;
  1220. ret = stm32_mdma_disable_chan(chan);
  1221. if (ret < 0)
  1222. pm_runtime_put(dmadev->ddev.dev);
  1223. return ret;
  1224. }
  1225. static void stm32_mdma_free_chan_resources(struct dma_chan *c)
  1226. {
  1227. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1228. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1229. unsigned long flags;
  1230. dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
  1231. if (chan->busy) {
  1232. spin_lock_irqsave(&chan->vchan.lock, flags);
  1233. stm32_mdma_stop(chan);
  1234. chan->desc = NULL;
  1235. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1236. }
  1237. pm_runtime_put(dmadev->ddev.dev);
  1238. vchan_free_chan_resources(to_virt_chan(c));
  1239. dmam_pool_destroy(chan->desc_pool);
  1240. chan->desc_pool = NULL;
  1241. }
  1242. static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
  1243. struct of_dma *ofdma)
  1244. {
  1245. struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
  1246. struct stm32_mdma_chan *chan;
  1247. struct dma_chan *c;
  1248. struct stm32_mdma_chan_config config;
  1249. if (dma_spec->args_count < 5) {
  1250. dev_err(mdma2dev(dmadev), "Bad number of args\n");
  1251. return NULL;
  1252. }
  1253. config.request = dma_spec->args[0];
  1254. config.priority_level = dma_spec->args[1];
  1255. config.transfer_config = dma_spec->args[2];
  1256. config.mask_addr = dma_spec->args[3];
  1257. config.mask_data = dma_spec->args[4];
  1258. if (config.request >= dmadev->nr_requests) {
  1259. dev_err(mdma2dev(dmadev), "Bad request line\n");
  1260. return NULL;
  1261. }
  1262. if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) {
  1263. dev_err(mdma2dev(dmadev), "Priority level not supported\n");
  1264. return NULL;
  1265. }
  1266. c = dma_get_any_slave_channel(&dmadev->ddev);
  1267. if (!c) {
  1268. dev_err(mdma2dev(dmadev), "No more channels available\n");
  1269. return NULL;
  1270. }
  1271. chan = to_stm32_mdma_chan(c);
  1272. chan->chan_config = config;
  1273. return c;
  1274. }
  1275. static const struct of_device_id stm32_mdma_of_match[] = {
  1276. { .compatible = "st,stm32h7-mdma", },
  1277. { /* sentinel */ },
  1278. };
  1279. MODULE_DEVICE_TABLE(of, stm32_mdma_of_match);
  1280. static int stm32_mdma_probe(struct platform_device *pdev)
  1281. {
  1282. struct stm32_mdma_chan *chan;
  1283. struct stm32_mdma_device *dmadev;
  1284. struct dma_device *dd;
  1285. struct device_node *of_node;
  1286. struct resource *res;
  1287. struct reset_control *rst;
  1288. u32 nr_channels, nr_requests;
  1289. int i, count, ret;
  1290. of_node = pdev->dev.of_node;
  1291. if (!of_node)
  1292. return -ENODEV;
  1293. ret = device_property_read_u32(&pdev->dev, "dma-channels",
  1294. &nr_channels);
  1295. if (ret) {
  1296. nr_channels = STM32_MDMA_MAX_CHANNELS;
  1297. dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n",
  1298. nr_channels);
  1299. }
  1300. ret = device_property_read_u32(&pdev->dev, "dma-requests",
  1301. &nr_requests);
  1302. if (ret) {
  1303. nr_requests = STM32_MDMA_MAX_REQUESTS;
  1304. dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n",
  1305. nr_requests);
  1306. }
  1307. count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
  1308. if (count < 0)
  1309. count = 0;
  1310. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count,
  1311. GFP_KERNEL);
  1312. if (!dmadev)
  1313. return -ENOMEM;
  1314. dmadev->nr_channels = nr_channels;
  1315. dmadev->nr_requests = nr_requests;
  1316. device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
  1317. dmadev->ahb_addr_masks,
  1318. count);
  1319. dmadev->nr_ahb_addr_masks = count;
  1320. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1321. dmadev->base = devm_ioremap_resource(&pdev->dev, res);
  1322. if (IS_ERR(dmadev->base))
  1323. return PTR_ERR(dmadev->base);
  1324. dmadev->clk = devm_clk_get(&pdev->dev, NULL);
  1325. if (IS_ERR(dmadev->clk))
  1326. return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk),
  1327. "Missing clock controller\n");
  1328. ret = clk_prepare_enable(dmadev->clk);
  1329. if (ret < 0) {
  1330. dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
  1331. return ret;
  1332. }
  1333. rst = devm_reset_control_get(&pdev->dev, NULL);
  1334. if (IS_ERR(rst)) {
  1335. ret = PTR_ERR(rst);
  1336. if (ret == -EPROBE_DEFER)
  1337. goto err_clk;
  1338. } else {
  1339. reset_control_assert(rst);
  1340. udelay(2);
  1341. reset_control_deassert(rst);
  1342. }
  1343. dd = &dmadev->ddev;
  1344. dma_cap_set(DMA_SLAVE, dd->cap_mask);
  1345. dma_cap_set(DMA_PRIVATE, dd->cap_mask);
  1346. dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  1347. dma_cap_set(DMA_MEMCPY, dd->cap_mask);
  1348. dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
  1349. dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
  1350. dd->device_tx_status = stm32_mdma_tx_status;
  1351. dd->device_issue_pending = stm32_mdma_issue_pending;
  1352. dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
  1353. dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
  1354. dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
  1355. dd->device_config = stm32_mdma_slave_config;
  1356. dd->device_pause = stm32_mdma_pause;
  1357. dd->device_resume = stm32_mdma_resume;
  1358. dd->device_terminate_all = stm32_mdma_terminate_all;
  1359. dd->device_synchronize = stm32_mdma_synchronize;
  1360. dd->descriptor_reuse = true;
  1361. dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1362. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1363. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  1364. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  1365. dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1366. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1367. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  1368. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  1369. dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
  1370. BIT(DMA_MEM_TO_MEM);
  1371. dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1372. dd->max_burst = STM32_MDMA_MAX_BURST;
  1373. dd->dev = &pdev->dev;
  1374. INIT_LIST_HEAD(&dd->channels);
  1375. for (i = 0; i < dmadev->nr_channels; i++) {
  1376. chan = &dmadev->chan[i];
  1377. chan->id = i;
  1378. chan->vchan.desc_free = stm32_mdma_desc_free;
  1379. vchan_init(&chan->vchan, dd);
  1380. }
  1381. dmadev->irq = platform_get_irq(pdev, 0);
  1382. if (dmadev->irq < 0) {
  1383. ret = dmadev->irq;
  1384. goto err_clk;
  1385. }
  1386. ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
  1387. 0, dev_name(&pdev->dev), dmadev);
  1388. if (ret) {
  1389. dev_err(&pdev->dev, "failed to request IRQ\n");
  1390. goto err_clk;
  1391. }
  1392. ret = dmaenginem_async_device_register(dd);
  1393. if (ret)
  1394. goto err_clk;
  1395. ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
  1396. if (ret < 0) {
  1397. dev_err(&pdev->dev,
  1398. "STM32 MDMA DMA OF registration failed %d\n", ret);
  1399. goto err_clk;
  1400. }
  1401. platform_set_drvdata(pdev, dmadev);
  1402. pm_runtime_set_active(&pdev->dev);
  1403. pm_runtime_enable(&pdev->dev);
  1404. pm_runtime_get_noresume(&pdev->dev);
  1405. pm_runtime_put(&pdev->dev);
  1406. dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
  1407. return 0;
  1408. err_clk:
  1409. clk_disable_unprepare(dmadev->clk);
  1410. return ret;
  1411. }
  1412. #ifdef CONFIG_PM
  1413. static int stm32_mdma_runtime_suspend(struct device *dev)
  1414. {
  1415. struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
  1416. clk_disable_unprepare(dmadev->clk);
  1417. return 0;
  1418. }
  1419. static int stm32_mdma_runtime_resume(struct device *dev)
  1420. {
  1421. struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
  1422. int ret;
  1423. ret = clk_prepare_enable(dmadev->clk);
  1424. if (ret) {
  1425. dev_err(dev, "failed to prepare_enable clock\n");
  1426. return ret;
  1427. }
  1428. return 0;
  1429. }
  1430. #endif
  1431. #ifdef CONFIG_PM_SLEEP
  1432. static int stm32_mdma_pm_suspend(struct device *dev)
  1433. {
  1434. struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
  1435. u32 ccr, id;
  1436. int ret;
  1437. ret = pm_runtime_resume_and_get(dev);
  1438. if (ret < 0)
  1439. return ret;
  1440. for (id = 0; id < dmadev->nr_channels; id++) {
  1441. ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
  1442. if (ccr & STM32_MDMA_CCR_EN) {
  1443. dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
  1444. return -EBUSY;
  1445. }
  1446. }
  1447. pm_runtime_put_sync(dev);
  1448. pm_runtime_force_suspend(dev);
  1449. return 0;
  1450. }
  1451. static int stm32_mdma_pm_resume(struct device *dev)
  1452. {
  1453. return pm_runtime_force_resume(dev);
  1454. }
  1455. #endif
  1456. static const struct dev_pm_ops stm32_mdma_pm_ops = {
  1457. SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
  1458. SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
  1459. stm32_mdma_runtime_resume, NULL)
  1460. };
  1461. static struct platform_driver stm32_mdma_driver = {
  1462. .probe = stm32_mdma_probe,
  1463. .driver = {
  1464. .name = "stm32-mdma",
  1465. .of_match_table = stm32_mdma_of_match,
  1466. .pm = &stm32_mdma_pm_ops,
  1467. },
  1468. };
  1469. static int __init stm32_mdma_init(void)
  1470. {
  1471. return platform_driver_register(&stm32_mdma_driver);
  1472. }
  1473. subsys_initcall(stm32_mdma_init);
  1474. MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
  1475. MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
  1476. MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
  1477. MODULE_LICENSE("GPL v2");