fsl-qdma.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright 2014-2015 Freescale
  3. // Copyright 2018 NXP
  4. /*
  5. * Driver for NXP Layerscape Queue Direct Memory Access Controller
  6. *
  7. * Author:
  8. * Wen He <wen.he_1@nxp.com>
  9. * Jiaheng Fan <jiaheng.fan@nxp.com>
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/delay.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/of_dma.h>
  17. #include <linux/dma-mapping.h>
  18. #include "virt-dma.h"
  19. #include "fsldma.h"
  20. /* Register related definition */
  21. #define FSL_QDMA_DMR 0x0
  22. #define FSL_QDMA_DSR 0x4
  23. #define FSL_QDMA_DEIER 0xe00
  24. #define FSL_QDMA_DEDR 0xe04
  25. #define FSL_QDMA_DECFDW0R 0xe10
  26. #define FSL_QDMA_DECFDW1R 0xe14
  27. #define FSL_QDMA_DECFDW2R 0xe18
  28. #define FSL_QDMA_DECFDW3R 0xe1c
  29. #define FSL_QDMA_DECFQIDR 0xe30
  30. #define FSL_QDMA_DECBR 0xe34
  31. #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
  32. #define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
  33. #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
  34. #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
  35. #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
  36. #define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
  37. #define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
  38. #define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
  39. #define FSL_QDMA_SQDPAR 0x80c
  40. #define FSL_QDMA_SQEPAR 0x814
  41. #define FSL_QDMA_BSQMR 0x800
  42. #define FSL_QDMA_BSQSR 0x804
  43. #define FSL_QDMA_BSQICR 0x828
  44. #define FSL_QDMA_CQMR 0xa00
  45. #define FSL_QDMA_CQDSCR1 0xa08
  46. #define FSL_QDMA_CQDSCR2 0xa0c
  47. #define FSL_QDMA_CQIER 0xa10
  48. #define FSL_QDMA_CQEDR 0xa14
  49. #define FSL_QDMA_SQCCMR 0xa20
  50. /* Registers for bit and genmask */
  51. #define FSL_QDMA_CQIDR_SQT BIT(15)
  52. #define QDMA_CCDF_FORMAT BIT(29)
  53. #define QDMA_CCDF_SER BIT(30)
  54. #define QDMA_SG_FIN BIT(30)
  55. #define QDMA_SG_LEN_MASK GENMASK(29, 0)
  56. #define QDMA_CCDF_MASK GENMASK(28, 20)
  57. #define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0)
  58. #define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0)
  59. #define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0)
  60. #define FSL_QDMA_BCQIER_CQTIE BIT(15)
  61. #define FSL_QDMA_BCQIER_CQPEIE BIT(23)
  62. #define FSL_QDMA_BSQICR_ICEN BIT(31)
  63. #define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
  64. #define FSL_QDMA_CQIER_MEIE BIT(31)
  65. #define FSL_QDMA_CQIER_TEIE BIT(0)
  66. #define FSL_QDMA_SQCCMR_ENTER_WM BIT(21)
  67. #define FSL_QDMA_BCQMR_EN BIT(31)
  68. #define FSL_QDMA_BCQMR_EI BIT(30)
  69. #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
  70. #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
  71. #define FSL_QDMA_BCQSR_QF BIT(16)
  72. #define FSL_QDMA_BCQSR_XOFF BIT(0)
  73. #define FSL_QDMA_BSQMR_EN BIT(31)
  74. #define FSL_QDMA_BSQMR_DI BIT(30)
  75. #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
  76. #define FSL_QDMA_BSQSR_QE BIT(17)
  77. #define FSL_QDMA_DMR_DQD BIT(30)
  78. #define FSL_QDMA_DSR_DB BIT(31)
  79. /* Size related definition */
  80. #define FSL_QDMA_QUEUE_MAX 8
  81. #define FSL_QDMA_COMMAND_BUFFER_SIZE 64
  82. #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
  83. #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
  84. #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
  85. #define FSL_QDMA_QUEUE_NUM_MAX 8
  86. /* Field definition for CMD */
  87. #define FSL_QDMA_CMD_RWTTYPE 0x4
  88. #define FSL_QDMA_CMD_LWC 0x2
  89. #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
  90. #define FSL_QDMA_CMD_NS_OFFSET 27
  91. #define FSL_QDMA_CMD_DQOS_OFFSET 24
  92. #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
  93. #define FSL_QDMA_CMD_DSEN_OFFSET 19
  94. #define FSL_QDMA_CMD_LWC_OFFSET 16
  95. /* Field definition for Descriptor status */
  96. #define QDMA_CCDF_STATUS_RTE BIT(5)
  97. #define QDMA_CCDF_STATUS_WTE BIT(4)
  98. #define QDMA_CCDF_STATUS_CDE BIT(2)
  99. #define QDMA_CCDF_STATUS_SDE BIT(1)
  100. #define QDMA_CCDF_STATUS_DDE BIT(0)
  101. #define QDMA_CCDF_STATUS_MASK (QDMA_CCDF_STATUS_RTE | \
  102. QDMA_CCDF_STATUS_WTE | \
  103. QDMA_CCDF_STATUS_CDE | \
  104. QDMA_CCDF_STATUS_SDE | \
  105. QDMA_CCDF_STATUS_DDE)
  106. /* Field definition for Descriptor offset */
  107. #define QDMA_CCDF_OFFSET 20
  108. #define QDMA_SDDF_CMD(x) (((u64)(x)) << 32)
  109. /* Field definition for safe loop count*/
  110. #define FSL_QDMA_HALT_COUNT 1500
  111. #define FSL_QDMA_MAX_SIZE 16385
  112. #define FSL_QDMA_COMP_TIMEOUT 1000
  113. #define FSL_COMMAND_QUEUE_OVERFLLOW 10
  114. #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
  115. (((fsl_qdma_engine)->block_offset) * (x))
  116. /**
  117. * struct fsl_qdma_format - This is the struct holding describing compound
  118. * descriptor format with qDMA.
  119. * @status: Command status and enqueue status notification.
  120. * @cfg: Frame offset and frame format.
  121. * @addr_lo: Holding the compound descriptor of the lower
  122. * 32-bits address in memory 40-bit address.
  123. * @addr_hi: Same as above member, but point high 8-bits in
  124. * memory 40-bit address.
  125. * @__reserved1: Reserved field.
  126. * @cfg8b_w1: Compound descriptor command queue origin produced
  127. * by qDMA and dynamic debug field.
  128. * @data: Pointer to the memory 40-bit address, describes DMA
  129. * source information and DMA destination information.
  130. */
  131. struct fsl_qdma_format {
  132. __le32 status;
  133. __le32 cfg;
  134. union {
  135. struct {
  136. __le32 addr_lo;
  137. u8 addr_hi;
  138. u8 __reserved1[2];
  139. u8 cfg8b_w1;
  140. } __packed;
  141. __le64 data;
  142. };
  143. } __packed;
  144. /* qDMA status notification pre information */
  145. struct fsl_pre_status {
  146. u64 addr;
  147. u8 queue;
  148. };
  149. static DEFINE_PER_CPU(struct fsl_pre_status, pre);
  150. struct fsl_qdma_chan {
  151. struct virt_dma_chan vchan;
  152. struct virt_dma_desc vdesc;
  153. enum dma_status status;
  154. struct fsl_qdma_engine *qdma;
  155. struct fsl_qdma_queue *queue;
  156. };
  157. struct fsl_qdma_queue {
  158. struct fsl_qdma_format *virt_head;
  159. struct fsl_qdma_format *virt_tail;
  160. struct list_head comp_used;
  161. struct list_head comp_free;
  162. struct dma_pool *comp_pool;
  163. struct dma_pool *desc_pool;
  164. spinlock_t queue_lock;
  165. dma_addr_t bus_addr;
  166. u32 n_cq;
  167. u32 id;
  168. struct fsl_qdma_format *cq;
  169. void __iomem *block_base;
  170. };
  171. struct fsl_qdma_comp {
  172. dma_addr_t bus_addr;
  173. dma_addr_t desc_bus_addr;
  174. struct fsl_qdma_format *virt_addr;
  175. struct fsl_qdma_format *desc_virt_addr;
  176. struct fsl_qdma_chan *qchan;
  177. struct virt_dma_desc vdesc;
  178. struct list_head list;
  179. };
  180. struct fsl_qdma_engine {
  181. struct dma_device dma_dev;
  182. void __iomem *ctrl_base;
  183. void __iomem *status_base;
  184. void __iomem *block_base;
  185. u32 n_chans;
  186. u32 n_queues;
  187. struct mutex fsl_qdma_mutex;
  188. int error_irq;
  189. int *queue_irq;
  190. u32 feature;
  191. struct fsl_qdma_queue *queue;
  192. struct fsl_qdma_queue **status;
  193. struct fsl_qdma_chan *chans;
  194. int block_number;
  195. int block_offset;
  196. int irq_base;
  197. int desc_allocated;
  198. };
  199. static inline u64
  200. qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
  201. {
  202. return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
  203. }
  204. static inline void
  205. qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
  206. {
  207. ccdf->addr_hi = upper_32_bits(addr);
  208. ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
  209. }
  210. static inline u8
  211. qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
  212. {
  213. return ccdf->cfg8b_w1 & U8_MAX;
  214. }
  215. static inline int
  216. qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
  217. {
  218. return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
  219. }
  220. static inline void
  221. qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
  222. {
  223. ccdf->cfg = cpu_to_le32(QDMA_CCDF_FORMAT |
  224. (offset << QDMA_CCDF_OFFSET));
  225. }
  226. static inline int
  227. qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
  228. {
  229. return (le32_to_cpu(ccdf->status) & QDMA_CCDF_STATUS_MASK);
  230. }
  231. static inline void
  232. qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
  233. {
  234. ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
  235. }
  236. static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
  237. {
  238. csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
  239. }
  240. static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
  241. {
  242. csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
  243. }
  244. static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
  245. {
  246. return FSL_DMA_IN(qdma, addr, 32);
  247. }
  248. static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
  249. void __iomem *addr)
  250. {
  251. FSL_DMA_OUT(qdma, addr, val, 32);
  252. }
  253. static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
  254. {
  255. return container_of(chan, struct fsl_qdma_chan, vchan.chan);
  256. }
  257. static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  258. {
  259. return container_of(vd, struct fsl_qdma_comp, vdesc);
  260. }
  261. static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
  262. {
  263. struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  264. struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  265. struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
  266. struct fsl_qdma_comp *comp_temp, *_comp_temp;
  267. unsigned long flags;
  268. LIST_HEAD(head);
  269. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  270. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  271. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  272. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  273. if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
  274. return;
  275. list_for_each_entry_safe(comp_temp, _comp_temp,
  276. &fsl_queue->comp_used, list) {
  277. dma_pool_free(fsl_queue->comp_pool,
  278. comp_temp->virt_addr,
  279. comp_temp->bus_addr);
  280. dma_pool_free(fsl_queue->desc_pool,
  281. comp_temp->desc_virt_addr,
  282. comp_temp->desc_bus_addr);
  283. list_del(&comp_temp->list);
  284. kfree(comp_temp);
  285. }
  286. list_for_each_entry_safe(comp_temp, _comp_temp,
  287. &fsl_queue->comp_free, list) {
  288. dma_pool_free(fsl_queue->comp_pool,
  289. comp_temp->virt_addr,
  290. comp_temp->bus_addr);
  291. dma_pool_free(fsl_queue->desc_pool,
  292. comp_temp->desc_virt_addr,
  293. comp_temp->desc_bus_addr);
  294. list_del(&comp_temp->list);
  295. kfree(comp_temp);
  296. }
  297. dma_pool_destroy(fsl_queue->comp_pool);
  298. dma_pool_destroy(fsl_queue->desc_pool);
  299. fsl_qdma->desc_allocated--;
  300. fsl_queue->comp_pool = NULL;
  301. fsl_queue->desc_pool = NULL;
  302. }
  303. static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
  304. dma_addr_t dst, dma_addr_t src, u32 len)
  305. {
  306. u32 cmd;
  307. struct fsl_qdma_format *sdf, *ddf;
  308. struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
  309. ccdf = fsl_comp->virt_addr;
  310. csgf_desc = fsl_comp->virt_addr + 1;
  311. csgf_src = fsl_comp->virt_addr + 2;
  312. csgf_dest = fsl_comp->virt_addr + 3;
  313. sdf = fsl_comp->desc_virt_addr;
  314. ddf = fsl_comp->desc_virt_addr + 1;
  315. memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
  316. memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
  317. /* Head Command Descriptor(Frame Descriptor) */
  318. qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
  319. qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
  320. qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
  321. /* Status notification is enqueued to status queue. */
  322. /* Compound Command Descriptor(Frame List Table) */
  323. qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
  324. /* It must be 32 as Compound S/G Descriptor */
  325. qdma_csgf_set_len(csgf_desc, 32);
  326. qdma_desc_addr_set64(csgf_src, src);
  327. qdma_csgf_set_len(csgf_src, len);
  328. qdma_desc_addr_set64(csgf_dest, dst);
  329. qdma_csgf_set_len(csgf_dest, len);
  330. /* This entry is the last entry. */
  331. qdma_csgf_set_f(csgf_dest, len);
  332. /* Descriptor Buffer */
  333. cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
  334. FSL_QDMA_CMD_RWTTYPE_OFFSET);
  335. sdf->data = QDMA_SDDF_CMD(cmd);
  336. cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
  337. FSL_QDMA_CMD_RWTTYPE_OFFSET);
  338. cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
  339. ddf->data = QDMA_SDDF_CMD(cmd);
  340. }
  341. /*
  342. * Pre-request full command descriptor for enqueue.
  343. */
  344. static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
  345. {
  346. int i;
  347. struct fsl_qdma_comp *comp_temp, *_comp_temp;
  348. for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
  349. comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
  350. if (!comp_temp)
  351. goto err_alloc;
  352. comp_temp->virt_addr =
  353. dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
  354. &comp_temp->bus_addr);
  355. if (!comp_temp->virt_addr)
  356. goto err_dma_alloc;
  357. comp_temp->desc_virt_addr =
  358. dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
  359. &comp_temp->desc_bus_addr);
  360. if (!comp_temp->desc_virt_addr)
  361. goto err_desc_dma_alloc;
  362. list_add_tail(&comp_temp->list, &queue->comp_free);
  363. }
  364. return 0;
  365. err_desc_dma_alloc:
  366. dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
  367. comp_temp->bus_addr);
  368. err_dma_alloc:
  369. kfree(comp_temp);
  370. err_alloc:
  371. list_for_each_entry_safe(comp_temp, _comp_temp,
  372. &queue->comp_free, list) {
  373. if (comp_temp->virt_addr)
  374. dma_pool_free(queue->comp_pool,
  375. comp_temp->virt_addr,
  376. comp_temp->bus_addr);
  377. if (comp_temp->desc_virt_addr)
  378. dma_pool_free(queue->desc_pool,
  379. comp_temp->desc_virt_addr,
  380. comp_temp->desc_bus_addr);
  381. list_del(&comp_temp->list);
  382. kfree(comp_temp);
  383. }
  384. return -ENOMEM;
  385. }
  386. /*
  387. * Request a command descriptor for enqueue.
  388. */
  389. static struct fsl_qdma_comp
  390. *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
  391. {
  392. unsigned long flags;
  393. struct fsl_qdma_comp *comp_temp;
  394. int timeout = FSL_QDMA_COMP_TIMEOUT;
  395. struct fsl_qdma_queue *queue = fsl_chan->queue;
  396. while (timeout--) {
  397. spin_lock_irqsave(&queue->queue_lock, flags);
  398. if (!list_empty(&queue->comp_free)) {
  399. comp_temp = list_first_entry(&queue->comp_free,
  400. struct fsl_qdma_comp,
  401. list);
  402. list_del(&comp_temp->list);
  403. spin_unlock_irqrestore(&queue->queue_lock, flags);
  404. comp_temp->qchan = fsl_chan;
  405. return comp_temp;
  406. }
  407. spin_unlock_irqrestore(&queue->queue_lock, flags);
  408. udelay(1);
  409. }
  410. return NULL;
  411. }
  412. static struct fsl_qdma_queue
  413. *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
  414. struct fsl_qdma_engine *fsl_qdma)
  415. {
  416. int ret, len, i, j;
  417. int queue_num, block_number;
  418. unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
  419. struct fsl_qdma_queue *queue_head, *queue_temp;
  420. queue_num = fsl_qdma->n_queues;
  421. block_number = fsl_qdma->block_number;
  422. if (queue_num > FSL_QDMA_QUEUE_MAX)
  423. queue_num = FSL_QDMA_QUEUE_MAX;
  424. len = sizeof(*queue_head) * queue_num * block_number;
  425. queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  426. if (!queue_head)
  427. return NULL;
  428. ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
  429. queue_size, queue_num);
  430. if (ret) {
  431. dev_err(&pdev->dev, "Can't get queue-sizes.\n");
  432. return NULL;
  433. }
  434. for (j = 0; j < block_number; j++) {
  435. for (i = 0; i < queue_num; i++) {
  436. if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
  437. queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
  438. dev_err(&pdev->dev,
  439. "Get wrong queue-sizes.\n");
  440. return NULL;
  441. }
  442. queue_temp = queue_head + i + (j * queue_num);
  443. queue_temp->cq =
  444. dma_alloc_coherent(&pdev->dev,
  445. sizeof(struct fsl_qdma_format) *
  446. queue_size[i],
  447. &queue_temp->bus_addr,
  448. GFP_KERNEL);
  449. if (!queue_temp->cq)
  450. return NULL;
  451. queue_temp->block_base = fsl_qdma->block_base +
  452. FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
  453. queue_temp->n_cq = queue_size[i];
  454. queue_temp->id = i;
  455. queue_temp->virt_head = queue_temp->cq;
  456. queue_temp->virt_tail = queue_temp->cq;
  457. /*
  458. * List for queue command buffer
  459. */
  460. INIT_LIST_HEAD(&queue_temp->comp_used);
  461. spin_lock_init(&queue_temp->queue_lock);
  462. }
  463. }
  464. return queue_head;
  465. }
  466. static struct fsl_qdma_queue
  467. *fsl_qdma_prep_status_queue(struct platform_device *pdev)
  468. {
  469. int ret;
  470. unsigned int status_size;
  471. struct fsl_qdma_queue *status_head;
  472. struct device_node *np = pdev->dev.of_node;
  473. ret = of_property_read_u32(np, "status-sizes", &status_size);
  474. if (ret) {
  475. dev_err(&pdev->dev, "Can't get status-sizes.\n");
  476. return NULL;
  477. }
  478. if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
  479. status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
  480. dev_err(&pdev->dev, "Get wrong status_size.\n");
  481. return NULL;
  482. }
  483. status_head = devm_kzalloc(&pdev->dev,
  484. sizeof(*status_head), GFP_KERNEL);
  485. if (!status_head)
  486. return NULL;
  487. /*
  488. * Buffer for queue command
  489. */
  490. status_head->cq = dma_alloc_coherent(&pdev->dev,
  491. sizeof(struct fsl_qdma_format) *
  492. status_size,
  493. &status_head->bus_addr,
  494. GFP_KERNEL);
  495. if (!status_head->cq) {
  496. devm_kfree(&pdev->dev, status_head);
  497. return NULL;
  498. }
  499. status_head->n_cq = status_size;
  500. status_head->virt_head = status_head->cq;
  501. status_head->virt_tail = status_head->cq;
  502. status_head->comp_pool = NULL;
  503. return status_head;
  504. }
  505. static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
  506. {
  507. u32 reg;
  508. int i, j, count = FSL_QDMA_HALT_COUNT;
  509. void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
  510. /* Disable the command queue and wait for idle state. */
  511. reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  512. reg |= FSL_QDMA_DMR_DQD;
  513. qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  514. for (j = 0; j < fsl_qdma->block_number; j++) {
  515. block = fsl_qdma->block_base +
  516. FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
  517. for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
  518. qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
  519. }
  520. while (1) {
  521. reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
  522. if (!(reg & FSL_QDMA_DSR_DB))
  523. break;
  524. if (count-- < 0)
  525. return -EBUSY;
  526. udelay(100);
  527. }
  528. for (j = 0; j < fsl_qdma->block_number; j++) {
  529. block = fsl_qdma->block_base +
  530. FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
  531. /* Disable status queue. */
  532. qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
  533. /*
  534. * clear the command queue interrupt detect register for
  535. * all queues.
  536. */
  537. qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
  538. block + FSL_QDMA_BCQIDR(0));
  539. }
  540. return 0;
  541. }
  542. static int
  543. fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
  544. void *block,
  545. int id)
  546. {
  547. bool duplicate;
  548. u32 reg, i, count;
  549. u8 completion_status;
  550. struct fsl_qdma_queue *temp_queue;
  551. struct fsl_qdma_format *status_addr;
  552. struct fsl_qdma_comp *fsl_comp = NULL;
  553. struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
  554. struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
  555. count = FSL_QDMA_MAX_SIZE;
  556. while (count--) {
  557. duplicate = 0;
  558. reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
  559. if (reg & FSL_QDMA_BSQSR_QE)
  560. return 0;
  561. status_addr = fsl_status->virt_head;
  562. if (qdma_ccdf_get_queue(status_addr) ==
  563. __this_cpu_read(pre.queue) &&
  564. qdma_ccdf_addr_get64(status_addr) ==
  565. __this_cpu_read(pre.addr))
  566. duplicate = 1;
  567. i = qdma_ccdf_get_queue(status_addr) +
  568. id * fsl_qdma->n_queues;
  569. __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
  570. __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
  571. temp_queue = fsl_queue + i;
  572. spin_lock(&temp_queue->queue_lock);
  573. if (list_empty(&temp_queue->comp_used)) {
  574. if (!duplicate) {
  575. spin_unlock(&temp_queue->queue_lock);
  576. return -EAGAIN;
  577. }
  578. } else {
  579. fsl_comp = list_first_entry(&temp_queue->comp_used,
  580. struct fsl_qdma_comp, list);
  581. if (fsl_comp->bus_addr + 16 !=
  582. __this_cpu_read(pre.addr)) {
  583. if (!duplicate) {
  584. spin_unlock(&temp_queue->queue_lock);
  585. return -EAGAIN;
  586. }
  587. }
  588. }
  589. if (duplicate) {
  590. reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
  591. reg |= FSL_QDMA_BSQMR_DI;
  592. qdma_desc_addr_set64(status_addr, 0x0);
  593. fsl_status->virt_head++;
  594. if (fsl_status->virt_head == fsl_status->cq
  595. + fsl_status->n_cq)
  596. fsl_status->virt_head = fsl_status->cq;
  597. qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  598. spin_unlock(&temp_queue->queue_lock);
  599. continue;
  600. }
  601. list_del(&fsl_comp->list);
  602. completion_status = qdma_ccdf_get_status(status_addr);
  603. reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
  604. reg |= FSL_QDMA_BSQMR_DI;
  605. qdma_desc_addr_set64(status_addr, 0x0);
  606. fsl_status->virt_head++;
  607. if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
  608. fsl_status->virt_head = fsl_status->cq;
  609. qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  610. spin_unlock(&temp_queue->queue_lock);
  611. /* The completion_status is evaluated here
  612. * (outside of spin lock)
  613. */
  614. if (completion_status) {
  615. /* A completion error occurred! */
  616. if (completion_status & QDMA_CCDF_STATUS_WTE) {
  617. /* Write transaction error */
  618. fsl_comp->vdesc.tx_result.result =
  619. DMA_TRANS_WRITE_FAILED;
  620. } else if (completion_status & QDMA_CCDF_STATUS_RTE) {
  621. /* Read transaction error */
  622. fsl_comp->vdesc.tx_result.result =
  623. DMA_TRANS_READ_FAILED;
  624. } else {
  625. /* Command/source/destination
  626. * description error
  627. */
  628. fsl_comp->vdesc.tx_result.result =
  629. DMA_TRANS_ABORTED;
  630. dev_err(fsl_qdma->dma_dev.dev,
  631. "DMA status descriptor error %x\n",
  632. completion_status);
  633. }
  634. }
  635. spin_lock(&fsl_comp->qchan->vchan.lock);
  636. vchan_cookie_complete(&fsl_comp->vdesc);
  637. fsl_comp->qchan->status = DMA_COMPLETE;
  638. spin_unlock(&fsl_comp->qchan->vchan.lock);
  639. }
  640. return 0;
  641. }
  642. static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
  643. {
  644. unsigned int intr;
  645. struct fsl_qdma_engine *fsl_qdma = dev_id;
  646. void __iomem *status = fsl_qdma->status_base;
  647. unsigned int decfdw0r;
  648. unsigned int decfdw1r;
  649. unsigned int decfdw2r;
  650. unsigned int decfdw3r;
  651. intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
  652. if (intr) {
  653. decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
  654. decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
  655. decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
  656. decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
  657. dev_err(fsl_qdma->dma_dev.dev,
  658. "DMA transaction error! (%x: %x-%x-%x-%x)\n",
  659. intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
  660. }
  661. qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
  662. return IRQ_HANDLED;
  663. }
  664. static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
  665. {
  666. int id;
  667. unsigned int intr, reg;
  668. struct fsl_qdma_engine *fsl_qdma = dev_id;
  669. void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
  670. id = irq - fsl_qdma->irq_base;
  671. if (id < 0 && id > fsl_qdma->block_number) {
  672. dev_err(fsl_qdma->dma_dev.dev,
  673. "irq %d is wrong irq_base is %d\n",
  674. irq, fsl_qdma->irq_base);
  675. }
  676. block = fsl_qdma->block_base +
  677. FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
  678. intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
  679. if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
  680. intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
  681. if (intr != 0) {
  682. reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  683. reg |= FSL_QDMA_DMR_DQD;
  684. qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  685. qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
  686. dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
  687. }
  688. /* Clear all detected events and interrupts. */
  689. qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
  690. block + FSL_QDMA_BCQIDR(0));
  691. return IRQ_HANDLED;
  692. }
  693. static int
  694. fsl_qdma_irq_init(struct platform_device *pdev,
  695. struct fsl_qdma_engine *fsl_qdma)
  696. {
  697. int i;
  698. int cpu;
  699. int ret;
  700. char irq_name[20];
  701. fsl_qdma->error_irq =
  702. platform_get_irq_byname(pdev, "qdma-error");
  703. if (fsl_qdma->error_irq < 0)
  704. return fsl_qdma->error_irq;
  705. ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
  706. fsl_qdma_error_handler, 0,
  707. "qDMA error", fsl_qdma);
  708. if (ret) {
  709. dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
  710. return ret;
  711. }
  712. for (i = 0; i < fsl_qdma->block_number; i++) {
  713. sprintf(irq_name, "qdma-queue%d", i);
  714. fsl_qdma->queue_irq[i] =
  715. platform_get_irq_byname(pdev, irq_name);
  716. if (fsl_qdma->queue_irq[i] < 0)
  717. return fsl_qdma->queue_irq[i];
  718. ret = devm_request_irq(&pdev->dev,
  719. fsl_qdma->queue_irq[i],
  720. fsl_qdma_queue_handler,
  721. 0,
  722. "qDMA queue",
  723. fsl_qdma);
  724. if (ret) {
  725. dev_err(&pdev->dev,
  726. "Can't register qDMA queue IRQ.\n");
  727. return ret;
  728. }
  729. cpu = i % num_online_cpus();
  730. ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
  731. get_cpu_mask(cpu));
  732. if (ret) {
  733. dev_err(&pdev->dev,
  734. "Can't set cpu %d affinity to IRQ %d.\n",
  735. cpu,
  736. fsl_qdma->queue_irq[i]);
  737. return ret;
  738. }
  739. }
  740. return 0;
  741. }
  742. static void fsl_qdma_irq_exit(struct platform_device *pdev,
  743. struct fsl_qdma_engine *fsl_qdma)
  744. {
  745. int i;
  746. devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
  747. for (i = 0; i < fsl_qdma->block_number; i++)
  748. devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
  749. }
  750. static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
  751. {
  752. u32 reg;
  753. int i, j, ret;
  754. struct fsl_qdma_queue *temp;
  755. void __iomem *status = fsl_qdma->status_base;
  756. void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
  757. struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
  758. /* Try to halt the qDMA engine first. */
  759. ret = fsl_qdma_halt(fsl_qdma);
  760. if (ret) {
  761. dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
  762. return ret;
  763. }
  764. for (i = 0; i < fsl_qdma->block_number; i++) {
  765. /*
  766. * Clear the command queue interrupt detect register for
  767. * all queues.
  768. */
  769. block = fsl_qdma->block_base +
  770. FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
  771. qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
  772. block + FSL_QDMA_BCQIDR(0));
  773. }
  774. for (j = 0; j < fsl_qdma->block_number; j++) {
  775. block = fsl_qdma->block_base +
  776. FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
  777. for (i = 0; i < fsl_qdma->n_queues; i++) {
  778. temp = fsl_queue + i + (j * fsl_qdma->n_queues);
  779. /*
  780. * Initialize Command Queue registers to
  781. * point to the first
  782. * command descriptor in memory.
  783. * Dequeue Pointer Address Registers
  784. * Enqueue Pointer Address Registers
  785. */
  786. qdma_writel(fsl_qdma, temp->bus_addr,
  787. block + FSL_QDMA_BCQDPA_SADDR(i));
  788. qdma_writel(fsl_qdma, temp->bus_addr,
  789. block + FSL_QDMA_BCQEPA_SADDR(i));
  790. /* Initialize the queue mode. */
  791. reg = FSL_QDMA_BCQMR_EN;
  792. reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
  793. reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
  794. qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
  795. }
  796. /*
  797. * Workaround for erratum: ERR010812.
  798. * We must enable XOFF to avoid the enqueue rejection occurs.
  799. * Setting SQCCMR ENTER_WM to 0x20.
  800. */
  801. qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
  802. block + FSL_QDMA_SQCCMR);
  803. /*
  804. * Initialize status queue registers to point to the first
  805. * command descriptor in memory.
  806. * Dequeue Pointer Address Registers
  807. * Enqueue Pointer Address Registers
  808. */
  809. qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
  810. block + FSL_QDMA_SQEPAR);
  811. qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
  812. block + FSL_QDMA_SQDPAR);
  813. /* Initialize status queue interrupt. */
  814. qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
  815. block + FSL_QDMA_BCQIER(0));
  816. qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
  817. FSL_QDMA_BSQICR_ICST(5) | 0x8000,
  818. block + FSL_QDMA_BSQICR);
  819. qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
  820. FSL_QDMA_CQIER_TEIE,
  821. block + FSL_QDMA_CQIER);
  822. /* Initialize the status queue mode. */
  823. reg = FSL_QDMA_BSQMR_EN;
  824. reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
  825. (fsl_qdma->status[j]->n_cq) - 6);
  826. qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
  827. reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
  828. }
  829. /* Initialize controller interrupt register. */
  830. qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
  831. qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
  832. reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
  833. reg &= ~FSL_QDMA_DMR_DQD;
  834. qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
  835. return 0;
  836. }
  837. static struct dma_async_tx_descriptor *
  838. fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
  839. dma_addr_t src, size_t len, unsigned long flags)
  840. {
  841. struct fsl_qdma_comp *fsl_comp;
  842. struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  843. fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
  844. if (!fsl_comp)
  845. return NULL;
  846. fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
  847. return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
  848. }
  849. static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
  850. {
  851. u32 reg;
  852. struct virt_dma_desc *vdesc;
  853. struct fsl_qdma_comp *fsl_comp;
  854. struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  855. void __iomem *block = fsl_queue->block_base;
  856. reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
  857. if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
  858. return;
  859. vdesc = vchan_next_desc(&fsl_chan->vchan);
  860. if (!vdesc)
  861. return;
  862. list_del(&vdesc->node);
  863. fsl_comp = to_fsl_qdma_comp(vdesc);
  864. memcpy(fsl_queue->virt_head++,
  865. fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
  866. if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
  867. fsl_queue->virt_head = fsl_queue->cq;
  868. list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
  869. barrier();
  870. reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
  871. reg |= FSL_QDMA_BCQMR_EI;
  872. qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
  873. fsl_chan->status = DMA_IN_PROGRESS;
  874. }
  875. static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
  876. {
  877. unsigned long flags;
  878. struct fsl_qdma_comp *fsl_comp;
  879. struct fsl_qdma_queue *fsl_queue;
  880. fsl_comp = to_fsl_qdma_comp(vdesc);
  881. fsl_queue = fsl_comp->qchan->queue;
  882. spin_lock_irqsave(&fsl_queue->queue_lock, flags);
  883. list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
  884. spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
  885. }
  886. static void fsl_qdma_issue_pending(struct dma_chan *chan)
  887. {
  888. unsigned long flags;
  889. struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  890. struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  891. spin_lock_irqsave(&fsl_queue->queue_lock, flags);
  892. spin_lock(&fsl_chan->vchan.lock);
  893. if (vchan_issue_pending(&fsl_chan->vchan))
  894. fsl_qdma_enqueue_desc(fsl_chan);
  895. spin_unlock(&fsl_chan->vchan.lock);
  896. spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
  897. }
  898. static void fsl_qdma_synchronize(struct dma_chan *chan)
  899. {
  900. struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  901. vchan_synchronize(&fsl_chan->vchan);
  902. }
  903. static int fsl_qdma_terminate_all(struct dma_chan *chan)
  904. {
  905. LIST_HEAD(head);
  906. unsigned long flags;
  907. struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  908. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  909. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  910. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  911. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  912. return 0;
  913. }
  914. static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
  915. {
  916. int ret;
  917. struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
  918. struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
  919. struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
  920. if (fsl_queue->comp_pool && fsl_queue->desc_pool)
  921. return fsl_qdma->desc_allocated;
  922. INIT_LIST_HEAD(&fsl_queue->comp_free);
  923. /*
  924. * The dma pool for queue command buffer
  925. */
  926. fsl_queue->comp_pool =
  927. dma_pool_create("comp_pool",
  928. chan->device->dev,
  929. FSL_QDMA_COMMAND_BUFFER_SIZE,
  930. 64, 0);
  931. if (!fsl_queue->comp_pool)
  932. return -ENOMEM;
  933. /*
  934. * The dma pool for Descriptor(SD/DD) buffer
  935. */
  936. fsl_queue->desc_pool =
  937. dma_pool_create("desc_pool",
  938. chan->device->dev,
  939. FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
  940. 32, 0);
  941. if (!fsl_queue->desc_pool)
  942. goto err_desc_pool;
  943. ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
  944. if (ret) {
  945. dev_err(chan->device->dev,
  946. "failed to alloc dma buffer for S/G descriptor\n");
  947. goto err_mem;
  948. }
  949. fsl_qdma->desc_allocated++;
  950. return fsl_qdma->desc_allocated;
  951. err_mem:
  952. dma_pool_destroy(fsl_queue->desc_pool);
  953. err_desc_pool:
  954. dma_pool_destroy(fsl_queue->comp_pool);
  955. return -ENOMEM;
  956. }
  957. static int fsl_qdma_probe(struct platform_device *pdev)
  958. {
  959. int ret, i;
  960. int blk_num, blk_off;
  961. u32 len, chans, queues;
  962. struct resource *res;
  963. struct fsl_qdma_chan *fsl_chan;
  964. struct fsl_qdma_engine *fsl_qdma;
  965. struct device_node *np = pdev->dev.of_node;
  966. ret = of_property_read_u32(np, "dma-channels", &chans);
  967. if (ret) {
  968. dev_err(&pdev->dev, "Can't get dma-channels.\n");
  969. return ret;
  970. }
  971. ret = of_property_read_u32(np, "block-offset", &blk_off);
  972. if (ret) {
  973. dev_err(&pdev->dev, "Can't get block-offset.\n");
  974. return ret;
  975. }
  976. ret = of_property_read_u32(np, "block-number", &blk_num);
  977. if (ret) {
  978. dev_err(&pdev->dev, "Can't get block-number.\n");
  979. return ret;
  980. }
  981. blk_num = min_t(int, blk_num, num_online_cpus());
  982. len = sizeof(*fsl_qdma);
  983. fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  984. if (!fsl_qdma)
  985. return -ENOMEM;
  986. len = sizeof(*fsl_chan) * chans;
  987. fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  988. if (!fsl_qdma->chans)
  989. return -ENOMEM;
  990. len = sizeof(struct fsl_qdma_queue *) * blk_num;
  991. fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  992. if (!fsl_qdma->status)
  993. return -ENOMEM;
  994. len = sizeof(int) * blk_num;
  995. fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  996. if (!fsl_qdma->queue_irq)
  997. return -ENOMEM;
  998. ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
  999. if (ret) {
  1000. dev_err(&pdev->dev, "Can't get queues.\n");
  1001. return ret;
  1002. }
  1003. fsl_qdma->desc_allocated = 0;
  1004. fsl_qdma->n_chans = chans;
  1005. fsl_qdma->n_queues = queues;
  1006. fsl_qdma->block_number = blk_num;
  1007. fsl_qdma->block_offset = blk_off;
  1008. mutex_init(&fsl_qdma->fsl_qdma_mutex);
  1009. for (i = 0; i < fsl_qdma->block_number; i++) {
  1010. fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
  1011. if (!fsl_qdma->status[i])
  1012. return -ENOMEM;
  1013. }
  1014. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1015. fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
  1016. if (IS_ERR(fsl_qdma->ctrl_base))
  1017. return PTR_ERR(fsl_qdma->ctrl_base);
  1018. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1019. fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
  1020. if (IS_ERR(fsl_qdma->status_base))
  1021. return PTR_ERR(fsl_qdma->status_base);
  1022. res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
  1023. fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
  1024. if (IS_ERR(fsl_qdma->block_base))
  1025. return PTR_ERR(fsl_qdma->block_base);
  1026. fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
  1027. if (!fsl_qdma->queue)
  1028. return -ENOMEM;
  1029. ret = fsl_qdma_irq_init(pdev, fsl_qdma);
  1030. if (ret)
  1031. return ret;
  1032. fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
  1033. if (fsl_qdma->irq_base < 0)
  1034. return fsl_qdma->irq_base;
  1035. fsl_qdma->feature = of_property_read_bool(np, "big-endian");
  1036. INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
  1037. for (i = 0; i < fsl_qdma->n_chans; i++) {
  1038. struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
  1039. fsl_chan->qdma = fsl_qdma;
  1040. fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
  1041. fsl_qdma->block_number);
  1042. fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
  1043. vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
  1044. }
  1045. dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
  1046. fsl_qdma->dma_dev.dev = &pdev->dev;
  1047. fsl_qdma->dma_dev.device_free_chan_resources =
  1048. fsl_qdma_free_chan_resources;
  1049. fsl_qdma->dma_dev.device_alloc_chan_resources =
  1050. fsl_qdma_alloc_chan_resources;
  1051. fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
  1052. fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
  1053. fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
  1054. fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
  1055. fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
  1056. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
  1057. if (ret) {
  1058. dev_err(&pdev->dev, "dma_set_mask failure.\n");
  1059. return ret;
  1060. }
  1061. platform_set_drvdata(pdev, fsl_qdma);
  1062. ret = dma_async_device_register(&fsl_qdma->dma_dev);
  1063. if (ret) {
  1064. dev_err(&pdev->dev,
  1065. "Can't register NXP Layerscape qDMA engine.\n");
  1066. return ret;
  1067. }
  1068. ret = fsl_qdma_reg_init(fsl_qdma);
  1069. if (ret) {
  1070. dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
  1071. return ret;
  1072. }
  1073. return 0;
  1074. }
  1075. static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
  1076. {
  1077. struct fsl_qdma_chan *chan, *_chan;
  1078. list_for_each_entry_safe(chan, _chan,
  1079. &dmadev->channels, vchan.chan.device_node) {
  1080. list_del(&chan->vchan.chan.device_node);
  1081. tasklet_kill(&chan->vchan.task);
  1082. }
  1083. }
  1084. static int fsl_qdma_remove(struct platform_device *pdev)
  1085. {
  1086. int i;
  1087. struct fsl_qdma_queue *status;
  1088. struct device_node *np = pdev->dev.of_node;
  1089. struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
  1090. fsl_qdma_irq_exit(pdev, fsl_qdma);
  1091. fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
  1092. of_dma_controller_free(np);
  1093. dma_async_device_unregister(&fsl_qdma->dma_dev);
  1094. for (i = 0; i < fsl_qdma->block_number; i++) {
  1095. status = fsl_qdma->status[i];
  1096. dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
  1097. status->n_cq, status->cq, status->bus_addr);
  1098. }
  1099. return 0;
  1100. }
  1101. static const struct of_device_id fsl_qdma_dt_ids[] = {
  1102. { .compatible = "fsl,ls1021a-qdma", },
  1103. { /* sentinel */ }
  1104. };
  1105. MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
  1106. static struct platform_driver fsl_qdma_driver = {
  1107. .driver = {
  1108. .name = "fsl-qdma",
  1109. .of_match_table = fsl_qdma_dt_ids,
  1110. },
  1111. .probe = fsl_qdma_probe,
  1112. .remove = fsl_qdma_remove,
  1113. };
  1114. module_platform_driver(fsl_qdma_driver);
  1115. MODULE_ALIAS("platform:fsl-qdma");
  1116. MODULE_LICENSE("GPL v2");
  1117. MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");