st_fdma.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * DMA driver for STMicroelectronics STi FDMA controller
  4. *
  5. * Copyright (C) 2014 STMicroelectronics
  6. *
  7. * Author: Ludovic Barre <Ludovic.barre@st.com>
  8. * Peter Griffin <peter.griffin@linaro.org>
  9. */
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/of_device.h>
  13. #include <linux/of_dma.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/remoteproc.h>
  17. #include <linux/slab.h>
  18. #include "st_fdma.h"
  19. static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
  20. {
  21. return container_of(c, struct st_fdma_chan, vchan.chan);
  22. }
  23. static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
  24. {
  25. return container_of(vd, struct st_fdma_desc, vdesc);
  26. }
  27. static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
  28. {
  29. struct st_fdma_dev *fdev = fchan->fdev;
  30. u32 req_line_cfg = fchan->cfg.req_line;
  31. u32 dreq_line;
  32. int try = 0;
  33. /*
  34. * dreq_mask is shared for n channels of fdma, so all accesses must be
  35. * atomic. if the dreq_mask is changed between ffz and set_bit,
  36. * we retry
  37. */
  38. do {
  39. if (fdev->dreq_mask == ~0L) {
  40. dev_err(fdev->dev, "No req lines available\n");
  41. return -EINVAL;
  42. }
  43. if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
  44. dev_err(fdev->dev, "Invalid or used req line\n");
  45. return -EINVAL;
  46. } else {
  47. dreq_line = req_line_cfg;
  48. }
  49. try++;
  50. } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
  51. dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
  52. dreq_line, fdev->dreq_mask);
  53. return dreq_line;
  54. }
  55. static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
  56. {
  57. struct st_fdma_dev *fdev = fchan->fdev;
  58. dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
  59. clear_bit(fchan->dreq_line, &fdev->dreq_mask);
  60. }
  61. static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
  62. {
  63. struct virt_dma_desc *vdesc;
  64. unsigned long nbytes, ch_cmd, cmd;
  65. vdesc = vchan_next_desc(&fchan->vchan);
  66. if (!vdesc)
  67. return;
  68. fchan->fdesc = to_st_fdma_desc(vdesc);
  69. nbytes = fchan->fdesc->node[0].desc->nbytes;
  70. cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
  71. ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
  72. /* start the channel for the descriptor */
  73. fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
  74. fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
  75. writel(cmd,
  76. fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
  77. dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
  78. }
  79. static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
  80. unsigned long int_sta)
  81. {
  82. unsigned long ch_sta, ch_err;
  83. int ch_id = fchan->vchan.chan.chan_id;
  84. struct st_fdma_dev *fdev = fchan->fdev;
  85. ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
  86. ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
  87. ch_sta &= FDMA_CH_CMD_STA_MASK;
  88. if (int_sta & FDMA_INT_STA_ERR) {
  89. dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
  90. fchan->status = DMA_ERROR;
  91. return;
  92. }
  93. switch (ch_sta) {
  94. case FDMA_CH_CMD_STA_PAUSED:
  95. fchan->status = DMA_PAUSED;
  96. break;
  97. case FDMA_CH_CMD_STA_RUNNING:
  98. fchan->status = DMA_IN_PROGRESS;
  99. break;
  100. }
  101. }
  102. static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
  103. {
  104. struct st_fdma_dev *fdev = dev_id;
  105. irqreturn_t ret = IRQ_NONE;
  106. struct st_fdma_chan *fchan = &fdev->chans[0];
  107. unsigned long int_sta, clr;
  108. int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
  109. clr = int_sta;
  110. for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
  111. if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
  112. continue;
  113. spin_lock(&fchan->vchan.lock);
  114. st_fdma_ch_sta_update(fchan, int_sta);
  115. if (fchan->fdesc) {
  116. if (!fchan->fdesc->iscyclic) {
  117. list_del(&fchan->fdesc->vdesc.node);
  118. vchan_cookie_complete(&fchan->fdesc->vdesc);
  119. fchan->fdesc = NULL;
  120. fchan->status = DMA_COMPLETE;
  121. } else {
  122. vchan_cyclic_callback(&fchan->fdesc->vdesc);
  123. }
  124. /* Start the next descriptor (if available) */
  125. if (!fchan->fdesc)
  126. st_fdma_xfer_desc(fchan);
  127. }
  128. spin_unlock(&fchan->vchan.lock);
  129. ret = IRQ_HANDLED;
  130. }
  131. fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
  132. return ret;
  133. }
  134. static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
  135. struct of_dma *ofdma)
  136. {
  137. struct st_fdma_dev *fdev = ofdma->of_dma_data;
  138. struct dma_chan *chan;
  139. struct st_fdma_chan *fchan;
  140. int ret;
  141. if (dma_spec->args_count < 1)
  142. return ERR_PTR(-EINVAL);
  143. if (fdev->dma_device.dev->of_node != dma_spec->np)
  144. return ERR_PTR(-EINVAL);
  145. ret = rproc_boot(fdev->slim_rproc->rproc);
  146. if (ret == -ENOENT)
  147. return ERR_PTR(-EPROBE_DEFER);
  148. else if (ret)
  149. return ERR_PTR(ret);
  150. chan = dma_get_any_slave_channel(&fdev->dma_device);
  151. if (!chan)
  152. goto err_chan;
  153. fchan = to_st_fdma_chan(chan);
  154. fchan->cfg.of_node = dma_spec->np;
  155. fchan->cfg.req_line = dma_spec->args[0];
  156. fchan->cfg.req_ctrl = 0;
  157. fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
  158. if (dma_spec->args_count > 1)
  159. fchan->cfg.req_ctrl = dma_spec->args[1]
  160. & FDMA_REQ_CTRL_CFG_MASK;
  161. if (dma_spec->args_count > 2)
  162. fchan->cfg.type = dma_spec->args[2];
  163. if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
  164. fchan->dreq_line = 0;
  165. } else {
  166. fchan->dreq_line = st_fdma_dreq_get(fchan);
  167. if (IS_ERR_VALUE(fchan->dreq_line)) {
  168. chan = ERR_PTR(fchan->dreq_line);
  169. goto err_chan;
  170. }
  171. }
  172. dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
  173. fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
  174. return chan;
  175. err_chan:
  176. rproc_shutdown(fdev->slim_rproc->rproc);
  177. return chan;
  178. }
  179. static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
  180. {
  181. struct st_fdma_desc *fdesc;
  182. int i;
  183. fdesc = to_st_fdma_desc(vdesc);
  184. for (i = 0; i < fdesc->n_nodes; i++)
  185. dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
  186. fdesc->node[i].pdesc);
  187. kfree(fdesc);
  188. }
  189. static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
  190. int sg_len)
  191. {
  192. struct st_fdma_desc *fdesc;
  193. int i;
  194. fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
  195. if (!fdesc)
  196. return NULL;
  197. fdesc->fchan = fchan;
  198. fdesc->n_nodes = sg_len;
  199. for (i = 0; i < sg_len; i++) {
  200. fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
  201. GFP_NOWAIT, &fdesc->node[i].pdesc);
  202. if (!fdesc->node[i].desc)
  203. goto err;
  204. }
  205. return fdesc;
  206. err:
  207. while (--i >= 0)
  208. dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
  209. fdesc->node[i].pdesc);
  210. kfree(fdesc);
  211. return NULL;
  212. }
  213. static int st_fdma_alloc_chan_res(struct dma_chan *chan)
  214. {
  215. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  216. /* Create the dma pool for descriptor allocation */
  217. fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
  218. fchan->fdev->dev,
  219. sizeof(struct st_fdma_hw_node),
  220. __alignof__(struct st_fdma_hw_node),
  221. 0);
  222. if (!fchan->node_pool) {
  223. dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
  224. return -ENOMEM;
  225. }
  226. dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
  227. fchan->vchan.chan.chan_id, fchan->cfg.type);
  228. return 0;
  229. }
  230. static void st_fdma_free_chan_res(struct dma_chan *chan)
  231. {
  232. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  233. struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
  234. unsigned long flags;
  235. dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
  236. __func__, fchan->vchan.chan.chan_id);
  237. if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
  238. st_fdma_dreq_put(fchan);
  239. spin_lock_irqsave(&fchan->vchan.lock, flags);
  240. fchan->fdesc = NULL;
  241. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  242. dma_pool_destroy(fchan->node_pool);
  243. fchan->node_pool = NULL;
  244. memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
  245. rproc_shutdown(rproc);
  246. }
  247. static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
  248. struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
  249. size_t len, unsigned long flags)
  250. {
  251. struct st_fdma_chan *fchan;
  252. struct st_fdma_desc *fdesc;
  253. struct st_fdma_hw_node *hw_node;
  254. if (!len)
  255. return NULL;
  256. fchan = to_st_fdma_chan(chan);
  257. /* We only require a single descriptor */
  258. fdesc = st_fdma_alloc_desc(fchan, 1);
  259. if (!fdesc) {
  260. dev_err(fchan->fdev->dev, "no memory for desc\n");
  261. return NULL;
  262. }
  263. hw_node = fdesc->node[0].desc;
  264. hw_node->next = 0;
  265. hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
  266. hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
  267. hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
  268. hw_node->control |= FDMA_NODE_CTRL_INT_EON;
  269. hw_node->nbytes = len;
  270. hw_node->saddr = src;
  271. hw_node->daddr = dst;
  272. hw_node->generic.length = len;
  273. hw_node->generic.sstride = 0;
  274. hw_node->generic.dstride = 0;
  275. return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
  276. }
  277. static int config_reqctrl(struct st_fdma_chan *fchan,
  278. enum dma_transfer_direction direction)
  279. {
  280. u32 maxburst = 0, addr = 0;
  281. enum dma_slave_buswidth width;
  282. int ch_id = fchan->vchan.chan.chan_id;
  283. struct st_fdma_dev *fdev = fchan->fdev;
  284. switch (direction) {
  285. case DMA_DEV_TO_MEM:
  286. fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
  287. maxburst = fchan->scfg.src_maxburst;
  288. width = fchan->scfg.src_addr_width;
  289. addr = fchan->scfg.src_addr;
  290. break;
  291. case DMA_MEM_TO_DEV:
  292. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
  293. maxburst = fchan->scfg.dst_maxburst;
  294. width = fchan->scfg.dst_addr_width;
  295. addr = fchan->scfg.dst_addr;
  296. break;
  297. default:
  298. return -EINVAL;
  299. }
  300. fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
  301. switch (width) {
  302. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  303. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
  304. break;
  305. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  306. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
  307. break;
  308. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  309. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
  310. break;
  311. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  312. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
  313. break;
  314. default:
  315. return -EINVAL;
  316. }
  317. fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
  318. fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
  319. dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
  320. fchan->cfg.dev_addr = addr;
  321. fchan->cfg.dir = direction;
  322. dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
  323. ch_id, addr, fchan->cfg.req_ctrl);
  324. return 0;
  325. }
  326. static void fill_hw_node(struct st_fdma_hw_node *hw_node,
  327. struct st_fdma_chan *fchan,
  328. enum dma_transfer_direction direction)
  329. {
  330. if (direction == DMA_MEM_TO_DEV) {
  331. hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
  332. hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
  333. hw_node->daddr = fchan->cfg.dev_addr;
  334. } else {
  335. hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
  336. hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
  337. hw_node->saddr = fchan->cfg.dev_addr;
  338. }
  339. hw_node->generic.sstride = 0;
  340. hw_node->generic.dstride = 0;
  341. }
  342. static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
  343. size_t len, enum dma_transfer_direction direction)
  344. {
  345. struct st_fdma_chan *fchan;
  346. if (!chan || !len)
  347. return NULL;
  348. fchan = to_st_fdma_chan(chan);
  349. if (!is_slave_direction(direction)) {
  350. dev_err(fchan->fdev->dev, "bad direction?\n");
  351. return NULL;
  352. }
  353. return fchan;
  354. }
  355. static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
  356. struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
  357. size_t period_len, enum dma_transfer_direction direction,
  358. unsigned long flags)
  359. {
  360. struct st_fdma_chan *fchan;
  361. struct st_fdma_desc *fdesc;
  362. int sg_len, i;
  363. fchan = st_fdma_prep_common(chan, len, direction);
  364. if (!fchan)
  365. return NULL;
  366. if (!period_len)
  367. return NULL;
  368. if (config_reqctrl(fchan, direction)) {
  369. dev_err(fchan->fdev->dev, "bad width or direction\n");
  370. return NULL;
  371. }
  372. /* the buffer length must be a multiple of period_len */
  373. if (len % period_len != 0) {
  374. dev_err(fchan->fdev->dev, "len is not multiple of period\n");
  375. return NULL;
  376. }
  377. sg_len = len / period_len;
  378. fdesc = st_fdma_alloc_desc(fchan, sg_len);
  379. if (!fdesc) {
  380. dev_err(fchan->fdev->dev, "no memory for desc\n");
  381. return NULL;
  382. }
  383. fdesc->iscyclic = true;
  384. for (i = 0; i < sg_len; i++) {
  385. struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
  386. hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
  387. hw_node->control =
  388. FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
  389. hw_node->control |= FDMA_NODE_CTRL_INT_EON;
  390. fill_hw_node(hw_node, fchan, direction);
  391. if (direction == DMA_MEM_TO_DEV)
  392. hw_node->saddr = buf_addr + (i * period_len);
  393. else
  394. hw_node->daddr = buf_addr + (i * period_len);
  395. hw_node->nbytes = period_len;
  396. hw_node->generic.length = period_len;
  397. }
  398. return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
  399. }
  400. static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
  401. struct dma_chan *chan, struct scatterlist *sgl,
  402. unsigned int sg_len, enum dma_transfer_direction direction,
  403. unsigned long flags, void *context)
  404. {
  405. struct st_fdma_chan *fchan;
  406. struct st_fdma_desc *fdesc;
  407. struct st_fdma_hw_node *hw_node;
  408. struct scatterlist *sg;
  409. int i;
  410. fchan = st_fdma_prep_common(chan, sg_len, direction);
  411. if (!fchan)
  412. return NULL;
  413. if (!sgl)
  414. return NULL;
  415. fdesc = st_fdma_alloc_desc(fchan, sg_len);
  416. if (!fdesc) {
  417. dev_err(fchan->fdev->dev, "no memory for desc\n");
  418. return NULL;
  419. }
  420. fdesc->iscyclic = false;
  421. for_each_sg(sgl, sg, sg_len, i) {
  422. hw_node = fdesc->node[i].desc;
  423. hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
  424. hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
  425. fill_hw_node(hw_node, fchan, direction);
  426. if (direction == DMA_MEM_TO_DEV)
  427. hw_node->saddr = sg_dma_address(sg);
  428. else
  429. hw_node->daddr = sg_dma_address(sg);
  430. hw_node->nbytes = sg_dma_len(sg);
  431. hw_node->generic.length = sg_dma_len(sg);
  432. }
  433. /* interrupt at end of last node */
  434. hw_node->control |= FDMA_NODE_CTRL_INT_EON;
  435. return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
  436. }
  437. static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
  438. struct virt_dma_desc *vdesc,
  439. bool in_progress)
  440. {
  441. struct st_fdma_desc *fdesc = fchan->fdesc;
  442. size_t residue = 0;
  443. dma_addr_t cur_addr = 0;
  444. int i;
  445. if (in_progress) {
  446. cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
  447. cur_addr &= FDMA_CH_CMD_DATA_MASK;
  448. }
  449. for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
  450. if (cur_addr == fdesc->node[i].pdesc) {
  451. residue += fnode_read(fchan, FDMA_CNTN_OFST);
  452. break;
  453. }
  454. residue += fdesc->node[i].desc->nbytes;
  455. }
  456. return residue;
  457. }
  458. static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
  459. dma_cookie_t cookie,
  460. struct dma_tx_state *txstate)
  461. {
  462. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  463. struct virt_dma_desc *vd;
  464. enum dma_status ret;
  465. unsigned long flags;
  466. ret = dma_cookie_status(chan, cookie, txstate);
  467. if (ret == DMA_COMPLETE || !txstate)
  468. return ret;
  469. spin_lock_irqsave(&fchan->vchan.lock, flags);
  470. vd = vchan_find_desc(&fchan->vchan, cookie);
  471. if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
  472. txstate->residue = st_fdma_desc_residue(fchan, vd, true);
  473. else if (vd)
  474. txstate->residue = st_fdma_desc_residue(fchan, vd, false);
  475. else
  476. txstate->residue = 0;
  477. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  478. return ret;
  479. }
  480. static void st_fdma_issue_pending(struct dma_chan *chan)
  481. {
  482. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  483. unsigned long flags;
  484. spin_lock_irqsave(&fchan->vchan.lock, flags);
  485. if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
  486. st_fdma_xfer_desc(fchan);
  487. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  488. }
  489. static int st_fdma_pause(struct dma_chan *chan)
  490. {
  491. unsigned long flags;
  492. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  493. int ch_id = fchan->vchan.chan.chan_id;
  494. unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
  495. dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
  496. spin_lock_irqsave(&fchan->vchan.lock, flags);
  497. if (fchan->fdesc)
  498. fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
  499. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  500. return 0;
  501. }
  502. static int st_fdma_resume(struct dma_chan *chan)
  503. {
  504. unsigned long flags;
  505. unsigned long val;
  506. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  507. int ch_id = fchan->vchan.chan.chan_id;
  508. dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
  509. spin_lock_irqsave(&fchan->vchan.lock, flags);
  510. if (fchan->fdesc) {
  511. val = fchan_read(fchan, FDMA_CH_CMD_OFST);
  512. val &= FDMA_CH_CMD_DATA_MASK;
  513. fchan_write(fchan, val, FDMA_CH_CMD_OFST);
  514. }
  515. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  516. return 0;
  517. }
  518. static int st_fdma_terminate_all(struct dma_chan *chan)
  519. {
  520. unsigned long flags;
  521. LIST_HEAD(head);
  522. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  523. int ch_id = fchan->vchan.chan.chan_id;
  524. unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
  525. dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
  526. spin_lock_irqsave(&fchan->vchan.lock, flags);
  527. fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
  528. fchan->fdesc = NULL;
  529. vchan_get_all_descriptors(&fchan->vchan, &head);
  530. spin_unlock_irqrestore(&fchan->vchan.lock, flags);
  531. vchan_dma_desc_free_list(&fchan->vchan, &head);
  532. return 0;
  533. }
  534. static int st_fdma_slave_config(struct dma_chan *chan,
  535. struct dma_slave_config *slave_cfg)
  536. {
  537. struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
  538. memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
  539. return 0;
  540. }
  541. static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
  542. .name = "STiH407",
  543. .id = 0,
  544. };
  545. static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
  546. .name = "STiH407",
  547. .id = 1,
  548. };
  549. static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
  550. .name = "STiH407",
  551. .id = 2,
  552. };
  553. static const struct of_device_id st_fdma_match[] = {
  554. { .compatible = "st,stih407-fdma-mpe31-11"
  555. , .data = &fdma_mpe31_stih407_11 },
  556. { .compatible = "st,stih407-fdma-mpe31-12"
  557. , .data = &fdma_mpe31_stih407_12 },
  558. { .compatible = "st,stih407-fdma-mpe31-13"
  559. , .data = &fdma_mpe31_stih407_13 },
  560. {},
  561. };
  562. MODULE_DEVICE_TABLE(of, st_fdma_match);
  563. static int st_fdma_parse_dt(struct platform_device *pdev,
  564. const struct st_fdma_driverdata *drvdata,
  565. struct st_fdma_dev *fdev)
  566. {
  567. snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
  568. drvdata->name, drvdata->id);
  569. return of_property_read_u32(pdev->dev.of_node, "dma-channels",
  570. &fdev->nr_channels);
  571. }
  572. #define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  573. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  574. BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
  575. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  576. static void st_fdma_free(struct st_fdma_dev *fdev)
  577. {
  578. struct st_fdma_chan *fchan;
  579. int i;
  580. for (i = 0; i < fdev->nr_channels; i++) {
  581. fchan = &fdev->chans[i];
  582. list_del(&fchan->vchan.chan.device_node);
  583. tasklet_kill(&fchan->vchan.task);
  584. }
  585. }
  586. static int st_fdma_probe(struct platform_device *pdev)
  587. {
  588. struct st_fdma_dev *fdev;
  589. const struct of_device_id *match;
  590. struct device_node *np = pdev->dev.of_node;
  591. const struct st_fdma_driverdata *drvdata;
  592. int ret, i;
  593. match = of_match_device((st_fdma_match), &pdev->dev);
  594. if (!match || !match->data) {
  595. dev_err(&pdev->dev, "No device match found\n");
  596. return -ENODEV;
  597. }
  598. drvdata = match->data;
  599. fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
  600. if (!fdev)
  601. return -ENOMEM;
  602. ret = st_fdma_parse_dt(pdev, drvdata, fdev);
  603. if (ret) {
  604. dev_err(&pdev->dev, "unable to find platform data\n");
  605. goto err;
  606. }
  607. fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
  608. sizeof(struct st_fdma_chan), GFP_KERNEL);
  609. if (!fdev->chans)
  610. return -ENOMEM;
  611. fdev->dev = &pdev->dev;
  612. fdev->drvdata = drvdata;
  613. platform_set_drvdata(pdev, fdev);
  614. fdev->irq = platform_get_irq(pdev, 0);
  615. if (fdev->irq < 0)
  616. return -EINVAL;
  617. ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
  618. dev_name(&pdev->dev), fdev);
  619. if (ret) {
  620. dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
  621. goto err;
  622. }
  623. fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
  624. if (IS_ERR(fdev->slim_rproc)) {
  625. ret = PTR_ERR(fdev->slim_rproc);
  626. dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
  627. goto err;
  628. }
  629. /* Initialise list of FDMA channels */
  630. INIT_LIST_HEAD(&fdev->dma_device.channels);
  631. for (i = 0; i < fdev->nr_channels; i++) {
  632. struct st_fdma_chan *fchan = &fdev->chans[i];
  633. fchan->fdev = fdev;
  634. fchan->vchan.desc_free = st_fdma_free_desc;
  635. vchan_init(&fchan->vchan, &fdev->dma_device);
  636. }
  637. /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
  638. fdev->dreq_mask = BIT(0) | BIT(31);
  639. dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
  640. dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
  641. dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
  642. fdev->dma_device.dev = &pdev->dev;
  643. fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
  644. fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
  645. fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
  646. fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
  647. fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
  648. fdev->dma_device.device_tx_status = st_fdma_tx_status;
  649. fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
  650. fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
  651. fdev->dma_device.device_config = st_fdma_slave_config;
  652. fdev->dma_device.device_pause = st_fdma_pause;
  653. fdev->dma_device.device_resume = st_fdma_resume;
  654. fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
  655. fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
  656. fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  657. fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  658. ret = dmaenginem_async_device_register(&fdev->dma_device);
  659. if (ret) {
  660. dev_err(&pdev->dev,
  661. "Failed to register DMA device (%d)\n", ret);
  662. goto err_rproc;
  663. }
  664. ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
  665. if (ret) {
  666. dev_err(&pdev->dev,
  667. "Failed to register controller (%d)\n", ret);
  668. goto err_rproc;
  669. }
  670. dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
  671. return 0;
  672. err_rproc:
  673. st_fdma_free(fdev);
  674. st_slim_rproc_put(fdev->slim_rproc);
  675. err:
  676. return ret;
  677. }
  678. static int st_fdma_remove(struct platform_device *pdev)
  679. {
  680. struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
  681. devm_free_irq(&pdev->dev, fdev->irq, fdev);
  682. st_slim_rproc_put(fdev->slim_rproc);
  683. of_dma_controller_free(pdev->dev.of_node);
  684. return 0;
  685. }
  686. static struct platform_driver st_fdma_platform_driver = {
  687. .driver = {
  688. .name = DRIVER_NAME,
  689. .of_match_table = st_fdma_match,
  690. },
  691. .probe = st_fdma_probe,
  692. .remove = st_fdma_remove,
  693. };
  694. module_platform_driver(st_fdma_platform_driver);
  695. MODULE_LICENSE("GPL v2");
  696. MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
  697. MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
  698. MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
  699. MODULE_ALIAS("platform:" DRIVER_NAME);