sprd-dma.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * Copyright (C) 2017 Spreadtrum Communications Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/dma/sprd-dma.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_dma.h>
  17. #include <linux/of_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include "virt-dma.h"
  21. #define SPRD_DMA_CHN_REG_OFFSET 0x1000
  22. #define SPRD_DMA_CHN_REG_LENGTH 0x40
  23. #define SPRD_DMA_MEMCPY_MIN_SIZE 64
  24. /* DMA global registers definition */
  25. #define SPRD_DMA_GLB_PAUSE 0x0
  26. #define SPRD_DMA_GLB_FRAG_WAIT 0x4
  27. #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
  28. #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
  29. #define SPRD_DMA_GLB_INT_RAW_STS 0x10
  30. #define SPRD_DMA_GLB_INT_MSK_STS 0x14
  31. #define SPRD_DMA_GLB_REQ_STS 0x18
  32. #define SPRD_DMA_GLB_CHN_EN_STS 0x1c
  33. #define SPRD_DMA_GLB_DEBUG_STS 0x20
  34. #define SPRD_DMA_GLB_ARB_SEL_STS 0x24
  35. #define SPRD_DMA_GLB_2STAGE_GRP1 0x28
  36. #define SPRD_DMA_GLB_2STAGE_GRP2 0x2c
  37. #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
  38. #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
  39. /* DMA channel registers definition */
  40. #define SPRD_DMA_CHN_PAUSE 0x0
  41. #define SPRD_DMA_CHN_REQ 0x4
  42. #define SPRD_DMA_CHN_CFG 0x8
  43. #define SPRD_DMA_CHN_INTC 0xc
  44. #define SPRD_DMA_CHN_SRC_ADDR 0x10
  45. #define SPRD_DMA_CHN_DES_ADDR 0x14
  46. #define SPRD_DMA_CHN_FRG_LEN 0x18
  47. #define SPRD_DMA_CHN_BLK_LEN 0x1c
  48. #define SPRD_DMA_CHN_TRSC_LEN 0x20
  49. #define SPRD_DMA_CHN_TRSF_STEP 0x24
  50. #define SPRD_DMA_CHN_WARP_PTR 0x28
  51. #define SPRD_DMA_CHN_WARP_TO 0x2c
  52. #define SPRD_DMA_CHN_LLIST_PTR 0x30
  53. #define SPRD_DMA_CHN_FRAG_STEP 0x34
  54. #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
  55. #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
  56. /* SPRD_DMA_GLB_2STAGE_GRP register definition */
  57. #define SPRD_DMA_GLB_2STAGE_EN BIT(24)
  58. #define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
  59. #define SPRD_DMA_GLB_DEST_INT BIT(22)
  60. #define SPRD_DMA_GLB_SRC_INT BIT(20)
  61. #define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
  62. #define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
  63. #define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
  64. #define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16)
  65. #define SPRD_DMA_GLB_TRG_OFFSET 16
  66. #define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8)
  67. #define SPRD_DMA_GLB_DEST_CHN_OFFSET 8
  68. #define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0)
  69. /* SPRD_DMA_CHN_INTC register definition */
  70. #define SPRD_DMA_INT_MASK GENMASK(4, 0)
  71. #define SPRD_DMA_INT_CLR_OFFSET 24
  72. #define SPRD_DMA_FRAG_INT_EN BIT(0)
  73. #define SPRD_DMA_BLK_INT_EN BIT(1)
  74. #define SPRD_DMA_TRANS_INT_EN BIT(2)
  75. #define SPRD_DMA_LIST_INT_EN BIT(3)
  76. #define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
  77. /* SPRD_DMA_CHN_CFG register definition */
  78. #define SPRD_DMA_CHN_EN BIT(0)
  79. #define SPRD_DMA_LINKLIST_EN BIT(4)
  80. #define SPRD_DMA_WAIT_BDONE_OFFSET 24
  81. #define SPRD_DMA_DONOT_WAIT_BDONE 1
  82. /* SPRD_DMA_CHN_REQ register definition */
  83. #define SPRD_DMA_REQ_EN BIT(0)
  84. /* SPRD_DMA_CHN_PAUSE register definition */
  85. #define SPRD_DMA_PAUSE_EN BIT(0)
  86. #define SPRD_DMA_PAUSE_STS BIT(2)
  87. #define SPRD_DMA_PAUSE_CNT 0x2000
  88. /* DMA_CHN_WARP_* register definition */
  89. #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
  90. #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
  91. #define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0)
  92. #define SPRD_DMA_HIGH_ADDR_OFFSET 4
  93. /* SPRD_DMA_CHN_INTC register definition */
  94. #define SPRD_DMA_FRAG_INT_STS BIT(16)
  95. #define SPRD_DMA_BLK_INT_STS BIT(17)
  96. #define SPRD_DMA_TRSC_INT_STS BIT(18)
  97. #define SPRD_DMA_LIST_INT_STS BIT(19)
  98. #define SPRD_DMA_CFGERR_INT_STS BIT(20)
  99. #define SPRD_DMA_CHN_INT_STS \
  100. (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
  101. SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
  102. SPRD_DMA_CFGERR_INT_STS)
  103. /* SPRD_DMA_CHN_FRG_LEN register definition */
  104. #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
  105. #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
  106. #define SPRD_DMA_SWT_MODE_OFFSET 26
  107. #define SPRD_DMA_REQ_MODE_OFFSET 24
  108. #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
  109. #define SPRD_DMA_WRAP_SEL_DEST BIT(23)
  110. #define SPRD_DMA_WRAP_EN BIT(22)
  111. #define SPRD_DMA_FIX_SEL_OFFSET 21
  112. #define SPRD_DMA_FIX_EN_OFFSET 20
  113. #define SPRD_DMA_LLIST_END BIT(19)
  114. #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
  115. /* SPRD_DMA_CHN_BLK_LEN register definition */
  116. #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
  117. /* SPRD_DMA_CHN_TRSC_LEN register definition */
  118. #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
  119. /* SPRD_DMA_CHN_TRSF_STEP register definition */
  120. #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
  121. #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
  122. #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
  123. /* SPRD DMA_SRC_BLK_STEP register definition */
  124. #define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28)
  125. #define SPRD_DMA_LLIST_HIGH_SHIFT 28
  126. /* define DMA channel mode & trigger mode mask */
  127. #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
  128. #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
  129. #define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
  130. /* define the DMA transfer step type */
  131. #define SPRD_DMA_NONE_STEP 0
  132. #define SPRD_DMA_BYTE_STEP 1
  133. #define SPRD_DMA_SHORT_STEP 2
  134. #define SPRD_DMA_WORD_STEP 4
  135. #define SPRD_DMA_DWORD_STEP 8
  136. #define SPRD_DMA_SOFTWARE_UID 0
  137. /* dma data width values */
  138. enum sprd_dma_datawidth {
  139. SPRD_DMA_DATAWIDTH_1_BYTE,
  140. SPRD_DMA_DATAWIDTH_2_BYTES,
  141. SPRD_DMA_DATAWIDTH_4_BYTES,
  142. SPRD_DMA_DATAWIDTH_8_BYTES,
  143. };
  144. /* dma channel hardware configuration */
  145. struct sprd_dma_chn_hw {
  146. u32 pause;
  147. u32 req;
  148. u32 cfg;
  149. u32 intc;
  150. u32 src_addr;
  151. u32 des_addr;
  152. u32 frg_len;
  153. u32 blk_len;
  154. u32 trsc_len;
  155. u32 trsf_step;
  156. u32 wrap_ptr;
  157. u32 wrap_to;
  158. u32 llist_ptr;
  159. u32 frg_step;
  160. u32 src_blk_step;
  161. u32 des_blk_step;
  162. };
  163. /* dma request description */
  164. struct sprd_dma_desc {
  165. struct virt_dma_desc vd;
  166. struct sprd_dma_chn_hw chn_hw;
  167. enum dma_transfer_direction dir;
  168. };
  169. /* dma channel description */
  170. struct sprd_dma_chn {
  171. struct virt_dma_chan vc;
  172. void __iomem *chn_base;
  173. struct sprd_dma_linklist linklist;
  174. struct dma_slave_config slave_cfg;
  175. u32 chn_num;
  176. u32 dev_id;
  177. enum sprd_dma_chn_mode chn_mode;
  178. enum sprd_dma_trg_mode trg_mode;
  179. enum sprd_dma_int_type int_type;
  180. struct sprd_dma_desc *cur_desc;
  181. };
  182. /* SPRD dma device */
  183. struct sprd_dma_dev {
  184. struct dma_device dma_dev;
  185. void __iomem *glb_base;
  186. struct clk *clk;
  187. struct clk *ashb_clk;
  188. int irq;
  189. u32 total_chns;
  190. struct sprd_dma_chn channels[];
  191. };
  192. static void sprd_dma_free_desc(struct virt_dma_desc *vd);
  193. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
  194. static struct of_dma_filter_info sprd_dma_info = {
  195. .filter_fn = sprd_dma_filter_fn,
  196. };
  197. static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
  198. {
  199. return container_of(c, struct sprd_dma_chn, vc.chan);
  200. }
  201. static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
  202. {
  203. struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
  204. return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
  205. }
  206. static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
  207. {
  208. return container_of(vd, struct sprd_dma_desc, vd);
  209. }
  210. static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg,
  211. u32 mask, u32 val)
  212. {
  213. u32 orig = readl(sdev->glb_base + reg);
  214. u32 tmp;
  215. tmp = (orig & ~mask) | val;
  216. writel(tmp, sdev->glb_base + reg);
  217. }
  218. static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
  219. u32 mask, u32 val)
  220. {
  221. u32 orig = readl(schan->chn_base + reg);
  222. u32 tmp;
  223. tmp = (orig & ~mask) | val;
  224. writel(tmp, schan->chn_base + reg);
  225. }
  226. static int sprd_dma_enable(struct sprd_dma_dev *sdev)
  227. {
  228. int ret;
  229. ret = clk_prepare_enable(sdev->clk);
  230. if (ret)
  231. return ret;
  232. /*
  233. * The ashb_clk is optional and only for AGCP DMA controller, so we
  234. * need add one condition to check if the ashb_clk need enable.
  235. */
  236. if (!IS_ERR(sdev->ashb_clk))
  237. ret = clk_prepare_enable(sdev->ashb_clk);
  238. return ret;
  239. }
  240. static void sprd_dma_disable(struct sprd_dma_dev *sdev)
  241. {
  242. clk_disable_unprepare(sdev->clk);
  243. /*
  244. * Need to check if we need disable the optional ashb_clk for AGCP DMA.
  245. */
  246. if (!IS_ERR(sdev->ashb_clk))
  247. clk_disable_unprepare(sdev->ashb_clk);
  248. }
  249. static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
  250. {
  251. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  252. u32 dev_id = schan->dev_id;
  253. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  254. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  255. SPRD_DMA_GLB_REQ_UID(dev_id);
  256. writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
  257. }
  258. }
  259. static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
  260. {
  261. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  262. u32 dev_id = schan->dev_id;
  263. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  264. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  265. SPRD_DMA_GLB_REQ_UID(dev_id);
  266. writel(0, sdev->glb_base + uid_offset);
  267. }
  268. }
  269. static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
  270. {
  271. sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
  272. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
  273. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
  274. }
  275. static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
  276. {
  277. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
  278. SPRD_DMA_CHN_EN);
  279. }
  280. static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
  281. {
  282. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
  283. }
  284. static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
  285. {
  286. sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
  287. SPRD_DMA_REQ_EN);
  288. }
  289. static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
  290. {
  291. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  292. u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
  293. if (enable) {
  294. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  295. SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
  296. do {
  297. pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
  298. if (pause & SPRD_DMA_PAUSE_STS)
  299. break;
  300. cpu_relax();
  301. } while (--timeout > 0);
  302. if (!timeout)
  303. dev_warn(sdev->dma_dev.dev,
  304. "pause dma controller timeout\n");
  305. } else {
  306. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  307. SPRD_DMA_PAUSE_EN, 0);
  308. }
  309. }
  310. static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
  311. {
  312. u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
  313. if (!(cfg & SPRD_DMA_CHN_EN))
  314. return;
  315. sprd_dma_pause_resume(schan, true);
  316. sprd_dma_disable_chn(schan);
  317. }
  318. static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan)
  319. {
  320. unsigned long addr, addr_high;
  321. addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  322. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) &
  323. SPRD_DMA_HIGH_ADDR_MASK;
  324. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  325. }
  326. static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
  327. {
  328. unsigned long addr, addr_high;
  329. addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  330. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
  331. SPRD_DMA_HIGH_ADDR_MASK;
  332. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  333. }
  334. static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
  335. {
  336. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  337. u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
  338. SPRD_DMA_CHN_INT_STS;
  339. switch (intc_sts) {
  340. case SPRD_DMA_CFGERR_INT_STS:
  341. return SPRD_DMA_CFGERR_INT;
  342. case SPRD_DMA_LIST_INT_STS:
  343. return SPRD_DMA_LIST_INT;
  344. case SPRD_DMA_TRSC_INT_STS:
  345. return SPRD_DMA_TRANS_INT;
  346. case SPRD_DMA_BLK_INT_STS:
  347. return SPRD_DMA_BLK_INT;
  348. case SPRD_DMA_FRAG_INT_STS:
  349. return SPRD_DMA_FRAG_INT;
  350. default:
  351. dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
  352. return SPRD_DMA_NO_INT;
  353. }
  354. }
  355. static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
  356. {
  357. u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  358. return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
  359. }
  360. static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
  361. {
  362. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  363. u32 val, chn = schan->chn_num + 1;
  364. switch (schan->chn_mode) {
  365. case SPRD_DMA_SRC_CHN0:
  366. val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
  367. val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
  368. val |= SPRD_DMA_GLB_2STAGE_EN;
  369. if (schan->int_type != SPRD_DMA_NO_INT)
  370. val |= SPRD_DMA_GLB_SRC_INT;
  371. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
  372. break;
  373. case SPRD_DMA_SRC_CHN1:
  374. val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
  375. val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
  376. val |= SPRD_DMA_GLB_2STAGE_EN;
  377. if (schan->int_type != SPRD_DMA_NO_INT)
  378. val |= SPRD_DMA_GLB_SRC_INT;
  379. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
  380. break;
  381. case SPRD_DMA_DST_CHN0:
  382. val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
  383. SPRD_DMA_GLB_DEST_CHN_MASK;
  384. val |= SPRD_DMA_GLB_2STAGE_EN;
  385. if (schan->int_type != SPRD_DMA_NO_INT)
  386. val |= SPRD_DMA_GLB_DEST_INT;
  387. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
  388. break;
  389. case SPRD_DMA_DST_CHN1:
  390. val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
  391. SPRD_DMA_GLB_DEST_CHN_MASK;
  392. val |= SPRD_DMA_GLB_2STAGE_EN;
  393. if (schan->int_type != SPRD_DMA_NO_INT)
  394. val |= SPRD_DMA_GLB_DEST_INT;
  395. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
  396. break;
  397. default:
  398. dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n",
  399. schan->chn_mode);
  400. return -EINVAL;
  401. }
  402. return 0;
  403. }
  404. static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
  405. {
  406. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  407. u32 reg, val, req_id;
  408. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
  409. return;
  410. /* The DMA request id always starts from 0. */
  411. req_id = schan->dev_id - 1;
  412. if (req_id < 32) {
  413. reg = SPRD_DMA_GLB_REQ_PEND0_EN;
  414. val = BIT(req_id);
  415. } else {
  416. reg = SPRD_DMA_GLB_REQ_PEND1_EN;
  417. val = BIT(req_id - 32);
  418. }
  419. sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
  420. }
  421. static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
  422. struct sprd_dma_desc *sdesc)
  423. {
  424. struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
  425. writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
  426. writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
  427. writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
  428. writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  429. writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  430. writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  431. writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
  432. writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
  433. writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
  434. writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
  435. writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
  436. writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
  437. writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
  438. writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
  439. writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
  440. writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
  441. }
  442. static void sprd_dma_start(struct sprd_dma_chn *schan)
  443. {
  444. struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
  445. if (!vd)
  446. return;
  447. list_del(&vd->node);
  448. schan->cur_desc = to_sprd_dma_desc(vd);
  449. /*
  450. * Set 2-stage configuration if the channel starts one 2-stage
  451. * transfer.
  452. */
  453. if (schan->chn_mode && sprd_dma_set_2stage_config(schan))
  454. return;
  455. /*
  456. * Copy the DMA configuration from DMA descriptor to this hardware
  457. * channel.
  458. */
  459. sprd_dma_set_chn_config(schan, schan->cur_desc);
  460. sprd_dma_set_uid(schan);
  461. sprd_dma_set_pending(schan, true);
  462. sprd_dma_enable_chn(schan);
  463. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
  464. schan->chn_mode != SPRD_DMA_DST_CHN0 &&
  465. schan->chn_mode != SPRD_DMA_DST_CHN1)
  466. sprd_dma_soft_request(schan);
  467. }
  468. static void sprd_dma_stop(struct sprd_dma_chn *schan)
  469. {
  470. sprd_dma_stop_and_disable(schan);
  471. sprd_dma_set_pending(schan, false);
  472. sprd_dma_unset_uid(schan);
  473. sprd_dma_clear_int(schan);
  474. schan->cur_desc = NULL;
  475. }
  476. static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
  477. enum sprd_dma_int_type int_type,
  478. enum sprd_dma_req_mode req_mode)
  479. {
  480. if (int_type == SPRD_DMA_NO_INT)
  481. return false;
  482. if (int_type >= req_mode + 1)
  483. return true;
  484. else
  485. return false;
  486. }
  487. static irqreturn_t dma_irq_handle(int irq, void *dev_id)
  488. {
  489. struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
  490. u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
  491. struct sprd_dma_chn *schan;
  492. struct sprd_dma_desc *sdesc;
  493. enum sprd_dma_req_mode req_type;
  494. enum sprd_dma_int_type int_type;
  495. bool trans_done = false, cyclic = false;
  496. u32 i;
  497. while (irq_status) {
  498. i = __ffs(irq_status);
  499. irq_status &= (irq_status - 1);
  500. schan = &sdev->channels[i];
  501. spin_lock(&schan->vc.lock);
  502. sdesc = schan->cur_desc;
  503. if (!sdesc) {
  504. spin_unlock(&schan->vc.lock);
  505. return IRQ_HANDLED;
  506. }
  507. int_type = sprd_dma_get_int_type(schan);
  508. req_type = sprd_dma_get_req_type(schan);
  509. sprd_dma_clear_int(schan);
  510. /* cyclic mode schedule callback */
  511. cyclic = schan->linklist.phy_addr ? true : false;
  512. if (cyclic == true) {
  513. vchan_cyclic_callback(&sdesc->vd);
  514. } else {
  515. /* Check if the dma request descriptor is done. */
  516. trans_done = sprd_dma_check_trans_done(sdesc, int_type,
  517. req_type);
  518. if (trans_done == true) {
  519. vchan_cookie_complete(&sdesc->vd);
  520. schan->cur_desc = NULL;
  521. sprd_dma_start(schan);
  522. }
  523. }
  524. spin_unlock(&schan->vc.lock);
  525. }
  526. return IRQ_HANDLED;
  527. }
  528. static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
  529. {
  530. return pm_runtime_get_sync(chan->device->dev);
  531. }
  532. static void sprd_dma_free_chan_resources(struct dma_chan *chan)
  533. {
  534. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  535. struct virt_dma_desc *cur_vd = NULL;
  536. unsigned long flags;
  537. spin_lock_irqsave(&schan->vc.lock, flags);
  538. if (schan->cur_desc)
  539. cur_vd = &schan->cur_desc->vd;
  540. sprd_dma_stop(schan);
  541. spin_unlock_irqrestore(&schan->vc.lock, flags);
  542. if (cur_vd)
  543. sprd_dma_free_desc(cur_vd);
  544. vchan_free_chan_resources(&schan->vc);
  545. pm_runtime_put(chan->device->dev);
  546. }
  547. static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
  548. dma_cookie_t cookie,
  549. struct dma_tx_state *txstate)
  550. {
  551. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  552. struct virt_dma_desc *vd;
  553. unsigned long flags;
  554. enum dma_status ret;
  555. u32 pos;
  556. ret = dma_cookie_status(chan, cookie, txstate);
  557. if (ret == DMA_COMPLETE || !txstate)
  558. return ret;
  559. spin_lock_irqsave(&schan->vc.lock, flags);
  560. vd = vchan_find_desc(&schan->vc, cookie);
  561. if (vd) {
  562. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  563. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  564. if (hw->trsc_len > 0)
  565. pos = hw->trsc_len;
  566. else if (hw->blk_len > 0)
  567. pos = hw->blk_len;
  568. else if (hw->frg_len > 0)
  569. pos = hw->frg_len;
  570. else
  571. pos = 0;
  572. } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
  573. struct sprd_dma_desc *sdesc = schan->cur_desc;
  574. if (sdesc->dir == DMA_DEV_TO_MEM)
  575. pos = sprd_dma_get_dst_addr(schan);
  576. else
  577. pos = sprd_dma_get_src_addr(schan);
  578. } else {
  579. pos = 0;
  580. }
  581. spin_unlock_irqrestore(&schan->vc.lock, flags);
  582. dma_set_residue(txstate, pos);
  583. return ret;
  584. }
  585. static void sprd_dma_issue_pending(struct dma_chan *chan)
  586. {
  587. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  588. unsigned long flags;
  589. spin_lock_irqsave(&schan->vc.lock, flags);
  590. if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
  591. sprd_dma_start(schan);
  592. spin_unlock_irqrestore(&schan->vc.lock, flags);
  593. }
  594. static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
  595. {
  596. switch (buswidth) {
  597. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  598. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  599. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  600. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  601. return ffs(buswidth) - 1;
  602. default:
  603. return -EINVAL;
  604. }
  605. }
  606. static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
  607. {
  608. switch (buswidth) {
  609. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  610. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  611. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  612. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  613. return buswidth;
  614. default:
  615. return -EINVAL;
  616. }
  617. }
  618. static int sprd_dma_fill_desc(struct dma_chan *chan,
  619. struct sprd_dma_chn_hw *hw,
  620. unsigned int sglen, int sg_index,
  621. dma_addr_t src, dma_addr_t dst, u32 len,
  622. enum dma_transfer_direction dir,
  623. unsigned long flags,
  624. struct dma_slave_config *slave_cfg)
  625. {
  626. struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
  627. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  628. enum sprd_dma_chn_mode chn_mode = schan->chn_mode;
  629. u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
  630. u32 int_mode = flags & SPRD_DMA_INT_MASK;
  631. int src_datawidth, dst_datawidth, src_step, dst_step;
  632. u32 temp, fix_mode = 0, fix_en = 0;
  633. phys_addr_t llist_ptr;
  634. if (dir == DMA_MEM_TO_DEV) {
  635. src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
  636. if (src_step < 0) {
  637. dev_err(sdev->dma_dev.dev, "invalid source step\n");
  638. return src_step;
  639. }
  640. /*
  641. * For 2-stage transfer, destination channel step can not be 0,
  642. * since destination device is AON IRAM.
  643. */
  644. if (chn_mode == SPRD_DMA_DST_CHN0 ||
  645. chn_mode == SPRD_DMA_DST_CHN1)
  646. dst_step = src_step;
  647. else
  648. dst_step = SPRD_DMA_NONE_STEP;
  649. } else {
  650. dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
  651. if (dst_step < 0) {
  652. dev_err(sdev->dma_dev.dev, "invalid destination step\n");
  653. return dst_step;
  654. }
  655. src_step = SPRD_DMA_NONE_STEP;
  656. }
  657. src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
  658. if (src_datawidth < 0) {
  659. dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
  660. return src_datawidth;
  661. }
  662. dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
  663. if (dst_datawidth < 0) {
  664. dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
  665. return dst_datawidth;
  666. }
  667. if (slave_cfg->slave_id)
  668. schan->dev_id = slave_cfg->slave_id;
  669. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  670. /*
  671. * wrap_ptr and wrap_to will save the high 4 bits source address and
  672. * destination address.
  673. */
  674. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  675. hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  676. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  677. hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
  678. /*
  679. * If the src step and dst step both are 0 or both are not 0, that means
  680. * we can not enable the fix mode. If one is 0 and another one is not,
  681. * we can enable the fix mode.
  682. */
  683. if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
  684. fix_en = 0;
  685. } else {
  686. fix_en = 1;
  687. if (src_step)
  688. fix_mode = 1;
  689. else
  690. fix_mode = 0;
  691. }
  692. hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
  693. temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  694. temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  695. temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
  696. temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
  697. temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
  698. temp |= schan->linklist.wrap_addr ?
  699. SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0;
  700. temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
  701. hw->frg_len = temp;
  702. hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
  703. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  704. temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  705. temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  706. hw->trsf_step = temp;
  707. /* link-list configuration */
  708. if (schan->linklist.phy_addr) {
  709. hw->cfg |= SPRD_DMA_LINKLIST_EN;
  710. /* link-list index */
  711. temp = sglen ? (sg_index + 1) % sglen : 0;
  712. /* Next link-list configuration's physical address offset */
  713. temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
  714. /*
  715. * Set the link-list pointer point to next link-list
  716. * configuration's physical address.
  717. */
  718. llist_ptr = schan->linklist.phy_addr + temp;
  719. hw->llist_ptr = lower_32_bits(llist_ptr);
  720. hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
  721. SPRD_DMA_LLIST_HIGH_MASK;
  722. if (schan->linklist.wrap_addr) {
  723. hw->wrap_ptr |= schan->linklist.wrap_addr &
  724. SPRD_DMA_WRAP_ADDR_MASK;
  725. hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK;
  726. }
  727. } else {
  728. hw->llist_ptr = 0;
  729. hw->src_blk_step = 0;
  730. }
  731. hw->frg_step = 0;
  732. hw->des_blk_step = 0;
  733. return 0;
  734. }
  735. static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
  736. unsigned int sglen, int sg_index,
  737. dma_addr_t src, dma_addr_t dst, u32 len,
  738. enum dma_transfer_direction dir,
  739. unsigned long flags,
  740. struct dma_slave_config *slave_cfg)
  741. {
  742. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  743. struct sprd_dma_chn_hw *hw;
  744. if (!schan->linklist.virt_addr)
  745. return -EINVAL;
  746. hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
  747. sg_index * sizeof(*hw));
  748. return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
  749. dir, flags, slave_cfg);
  750. }
  751. static struct dma_async_tx_descriptor *
  752. sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  753. size_t len, unsigned long flags)
  754. {
  755. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  756. struct sprd_dma_desc *sdesc;
  757. struct sprd_dma_chn_hw *hw;
  758. enum sprd_dma_datawidth datawidth;
  759. u32 step, temp;
  760. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  761. if (!sdesc)
  762. return NULL;
  763. hw = &sdesc->chn_hw;
  764. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  765. hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
  766. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  767. hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
  768. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  769. SPRD_DMA_HIGH_ADDR_MASK;
  770. hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  771. SPRD_DMA_HIGH_ADDR_MASK;
  772. if (IS_ALIGNED(len, 8)) {
  773. datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
  774. step = SPRD_DMA_DWORD_STEP;
  775. } else if (IS_ALIGNED(len, 4)) {
  776. datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
  777. step = SPRD_DMA_WORD_STEP;
  778. } else if (IS_ALIGNED(len, 2)) {
  779. datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
  780. step = SPRD_DMA_SHORT_STEP;
  781. } else {
  782. datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
  783. step = SPRD_DMA_BYTE_STEP;
  784. }
  785. temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  786. temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  787. temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
  788. temp |= len & SPRD_DMA_FRG_LEN_MASK;
  789. hw->frg_len = temp;
  790. hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
  791. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  792. temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  793. temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  794. hw->trsf_step = temp;
  795. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  796. }
  797. static struct dma_async_tx_descriptor *
  798. sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  799. unsigned int sglen, enum dma_transfer_direction dir,
  800. unsigned long flags, void *context)
  801. {
  802. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  803. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  804. dma_addr_t src = 0, dst = 0;
  805. dma_addr_t start_src = 0, start_dst = 0;
  806. struct sprd_dma_desc *sdesc;
  807. struct scatterlist *sg;
  808. u32 len = 0;
  809. int ret, i;
  810. if (!is_slave_direction(dir))
  811. return NULL;
  812. if (context) {
  813. struct sprd_dma_linklist *ll_cfg =
  814. (struct sprd_dma_linklist *)context;
  815. schan->linklist.phy_addr = ll_cfg->phy_addr;
  816. schan->linklist.virt_addr = ll_cfg->virt_addr;
  817. schan->linklist.wrap_addr = ll_cfg->wrap_addr;
  818. } else {
  819. schan->linklist.phy_addr = 0;
  820. schan->linklist.virt_addr = 0;
  821. schan->linklist.wrap_addr = 0;
  822. }
  823. /*
  824. * Set channel mode, interrupt mode and trigger mode for 2-stage
  825. * transfer.
  826. */
  827. schan->chn_mode =
  828. (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
  829. schan->trg_mode =
  830. (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
  831. schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
  832. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  833. if (!sdesc)
  834. return NULL;
  835. sdesc->dir = dir;
  836. for_each_sg(sgl, sg, sglen, i) {
  837. len = sg_dma_len(sg);
  838. if (dir == DMA_MEM_TO_DEV) {
  839. src = sg_dma_address(sg);
  840. dst = slave_cfg->dst_addr;
  841. } else {
  842. src = slave_cfg->src_addr;
  843. dst = sg_dma_address(sg);
  844. }
  845. if (!i) {
  846. start_src = src;
  847. start_dst = dst;
  848. }
  849. /*
  850. * The link-list mode needs at least 2 link-list
  851. * configurations. If there is only one sg, it doesn't
  852. * need to fill the link-list configuration.
  853. */
  854. if (sglen < 2)
  855. break;
  856. ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
  857. dir, flags, slave_cfg);
  858. if (ret) {
  859. kfree(sdesc);
  860. return NULL;
  861. }
  862. }
  863. ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
  864. start_dst, len, dir, flags, slave_cfg);
  865. if (ret) {
  866. kfree(sdesc);
  867. return NULL;
  868. }
  869. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  870. }
  871. static int sprd_dma_slave_config(struct dma_chan *chan,
  872. struct dma_slave_config *config)
  873. {
  874. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  875. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  876. memcpy(slave_cfg, config, sizeof(*config));
  877. return 0;
  878. }
  879. static int sprd_dma_pause(struct dma_chan *chan)
  880. {
  881. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  882. unsigned long flags;
  883. spin_lock_irqsave(&schan->vc.lock, flags);
  884. sprd_dma_pause_resume(schan, true);
  885. spin_unlock_irqrestore(&schan->vc.lock, flags);
  886. return 0;
  887. }
  888. static int sprd_dma_resume(struct dma_chan *chan)
  889. {
  890. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  891. unsigned long flags;
  892. spin_lock_irqsave(&schan->vc.lock, flags);
  893. sprd_dma_pause_resume(schan, false);
  894. spin_unlock_irqrestore(&schan->vc.lock, flags);
  895. return 0;
  896. }
  897. static int sprd_dma_terminate_all(struct dma_chan *chan)
  898. {
  899. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  900. struct virt_dma_desc *cur_vd = NULL;
  901. unsigned long flags;
  902. LIST_HEAD(head);
  903. spin_lock_irqsave(&schan->vc.lock, flags);
  904. if (schan->cur_desc)
  905. cur_vd = &schan->cur_desc->vd;
  906. sprd_dma_stop(schan);
  907. vchan_get_all_descriptors(&schan->vc, &head);
  908. spin_unlock_irqrestore(&schan->vc.lock, flags);
  909. if (cur_vd)
  910. sprd_dma_free_desc(cur_vd);
  911. vchan_dma_desc_free_list(&schan->vc, &head);
  912. return 0;
  913. }
  914. static void sprd_dma_free_desc(struct virt_dma_desc *vd)
  915. {
  916. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  917. kfree(sdesc);
  918. }
  919. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
  920. {
  921. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  922. u32 slave_id = *(u32 *)param;
  923. schan->dev_id = slave_id;
  924. return true;
  925. }
  926. static int sprd_dma_probe(struct platform_device *pdev)
  927. {
  928. struct device_node *np = pdev->dev.of_node;
  929. struct sprd_dma_dev *sdev;
  930. struct sprd_dma_chn *dma_chn;
  931. u32 chn_count;
  932. int ret, i;
  933. ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
  934. if (ret) {
  935. dev_err(&pdev->dev, "get dma channels count failed\n");
  936. return ret;
  937. }
  938. sdev = devm_kzalloc(&pdev->dev,
  939. struct_size(sdev, channels, chn_count),
  940. GFP_KERNEL);
  941. if (!sdev)
  942. return -ENOMEM;
  943. sdev->clk = devm_clk_get(&pdev->dev, "enable");
  944. if (IS_ERR(sdev->clk)) {
  945. dev_err(&pdev->dev, "get enable clock failed\n");
  946. return PTR_ERR(sdev->clk);
  947. }
  948. /* ashb clock is optional for AGCP DMA */
  949. sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
  950. if (IS_ERR(sdev->ashb_clk))
  951. dev_warn(&pdev->dev, "no optional ashb eb clock\n");
  952. /*
  953. * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
  954. * DMA controller, it can or do not request the irq, which will save
  955. * system power without resuming system by DMA interrupts if AGCP DMA
  956. * does not request the irq. Thus the DMA interrupts property should
  957. * be optional.
  958. */
  959. sdev->irq = platform_get_irq(pdev, 0);
  960. if (sdev->irq > 0) {
  961. ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
  962. 0, "sprd_dma", (void *)sdev);
  963. if (ret < 0) {
  964. dev_err(&pdev->dev, "request dma irq failed\n");
  965. return ret;
  966. }
  967. } else {
  968. dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
  969. }
  970. sdev->glb_base = devm_platform_ioremap_resource(pdev, 0);
  971. if (IS_ERR(sdev->glb_base))
  972. return PTR_ERR(sdev->glb_base);
  973. dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
  974. sdev->total_chns = chn_count;
  975. sdev->dma_dev.chancnt = chn_count;
  976. INIT_LIST_HEAD(&sdev->dma_dev.channels);
  977. INIT_LIST_HEAD(&sdev->dma_dev.global_node);
  978. sdev->dma_dev.dev = &pdev->dev;
  979. sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
  980. sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
  981. sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
  982. sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
  983. sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
  984. sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
  985. sdev->dma_dev.device_config = sprd_dma_slave_config;
  986. sdev->dma_dev.device_pause = sprd_dma_pause;
  987. sdev->dma_dev.device_resume = sprd_dma_resume;
  988. sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
  989. for (i = 0; i < chn_count; i++) {
  990. dma_chn = &sdev->channels[i];
  991. dma_chn->chn_num = i;
  992. dma_chn->cur_desc = NULL;
  993. /* get each channel's registers base address. */
  994. dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
  995. SPRD_DMA_CHN_REG_LENGTH * i;
  996. dma_chn->vc.desc_free = sprd_dma_free_desc;
  997. vchan_init(&dma_chn->vc, &sdev->dma_dev);
  998. }
  999. platform_set_drvdata(pdev, sdev);
  1000. ret = sprd_dma_enable(sdev);
  1001. if (ret)
  1002. return ret;
  1003. pm_runtime_set_active(&pdev->dev);
  1004. pm_runtime_enable(&pdev->dev);
  1005. ret = pm_runtime_get_sync(&pdev->dev);
  1006. if (ret < 0)
  1007. goto err_rpm;
  1008. ret = dma_async_device_register(&sdev->dma_dev);
  1009. if (ret < 0) {
  1010. dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
  1011. goto err_register;
  1012. }
  1013. sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
  1014. ret = of_dma_controller_register(np, of_dma_simple_xlate,
  1015. &sprd_dma_info);
  1016. if (ret)
  1017. goto err_of_register;
  1018. pm_runtime_put(&pdev->dev);
  1019. return 0;
  1020. err_of_register:
  1021. dma_async_device_unregister(&sdev->dma_dev);
  1022. err_register:
  1023. pm_runtime_put_noidle(&pdev->dev);
  1024. pm_runtime_disable(&pdev->dev);
  1025. err_rpm:
  1026. sprd_dma_disable(sdev);
  1027. return ret;
  1028. }
  1029. static int sprd_dma_remove(struct platform_device *pdev)
  1030. {
  1031. struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
  1032. struct sprd_dma_chn *c, *cn;
  1033. int ret;
  1034. ret = pm_runtime_get_sync(&pdev->dev);
  1035. if (ret < 0)
  1036. return ret;
  1037. /* explicitly free the irq */
  1038. if (sdev->irq > 0)
  1039. devm_free_irq(&pdev->dev, sdev->irq, sdev);
  1040. list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
  1041. vc.chan.device_node) {
  1042. list_del(&c->vc.chan.device_node);
  1043. tasklet_kill(&c->vc.task);
  1044. }
  1045. of_dma_controller_free(pdev->dev.of_node);
  1046. dma_async_device_unregister(&sdev->dma_dev);
  1047. sprd_dma_disable(sdev);
  1048. pm_runtime_put_noidle(&pdev->dev);
  1049. pm_runtime_disable(&pdev->dev);
  1050. return 0;
  1051. }
  1052. static const struct of_device_id sprd_dma_match[] = {
  1053. { .compatible = "sprd,sc9860-dma", },
  1054. {},
  1055. };
  1056. MODULE_DEVICE_TABLE(of, sprd_dma_match);
  1057. static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
  1058. {
  1059. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  1060. sprd_dma_disable(sdev);
  1061. return 0;
  1062. }
  1063. static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
  1064. {
  1065. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  1066. int ret;
  1067. ret = sprd_dma_enable(sdev);
  1068. if (ret)
  1069. dev_err(sdev->dma_dev.dev, "enable dma failed\n");
  1070. return ret;
  1071. }
  1072. static const struct dev_pm_ops sprd_dma_pm_ops = {
  1073. SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
  1074. sprd_dma_runtime_resume,
  1075. NULL)
  1076. };
  1077. static struct platform_driver sprd_dma_driver = {
  1078. .probe = sprd_dma_probe,
  1079. .remove = sprd_dma_remove,
  1080. .driver = {
  1081. .name = "sprd-dma",
  1082. .of_match_table = sprd_dma_match,
  1083. .pm = &sprd_dma_pm_ops,
  1084. },
  1085. };
  1086. module_platform_driver(sprd_dma_driver);
  1087. MODULE_LICENSE("GPL v2");
  1088. MODULE_DESCRIPTION("DMA driver for Spreadtrum");
  1089. MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
  1090. MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
  1091. MODULE_ALIAS("platform:sprd-dma");