dpaa2-qdma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright 2019 NXP
  3. #include <linux/init.h>
  4. #include <linux/module.h>
  5. #include <linux/dmapool.h>
  6. #include <linux/of_irq.h>
  7. #include <linux/iommu.h>
  8. #include <linux/sys_soc.h>
  9. #include <linux/fsl/mc.h>
  10. #include <soc/fsl/dpaa2-io.h>
  11. #include "../virt-dma.h"
  12. #include "dpdmai.h"
  13. #include "dpaa2-qdma.h"
  14. static bool smmu_disable = true;
  15. static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
  16. {
  17. return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
  18. }
  19. static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  20. {
  21. return container_of(vd, struct dpaa2_qdma_comp, vdesc);
  22. }
  23. static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
  24. {
  25. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  26. struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  27. struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
  28. dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
  29. sizeof(struct dpaa2_fd),
  30. sizeof(struct dpaa2_fd), 0);
  31. if (!dpaa2_chan->fd_pool)
  32. goto err;
  33. dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
  34. sizeof(struct dpaa2_fl_entry),
  35. sizeof(struct dpaa2_fl_entry), 0);
  36. if (!dpaa2_chan->fl_pool)
  37. goto err_fd;
  38. dpaa2_chan->sdd_pool =
  39. dma_pool_create("sdd_pool", dev,
  40. sizeof(struct dpaa2_qdma_sd_d),
  41. sizeof(struct dpaa2_qdma_sd_d), 0);
  42. if (!dpaa2_chan->sdd_pool)
  43. goto err_fl;
  44. return dpaa2_qdma->desc_allocated++;
  45. err_fl:
  46. dma_pool_destroy(dpaa2_chan->fl_pool);
  47. err_fd:
  48. dma_pool_destroy(dpaa2_chan->fd_pool);
  49. err:
  50. return -ENOMEM;
  51. }
  52. static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
  53. {
  54. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  55. struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  56. unsigned long flags;
  57. LIST_HEAD(head);
  58. spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
  59. vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
  60. spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
  61. vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
  62. dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
  63. dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
  64. dma_pool_destroy(dpaa2_chan->fd_pool);
  65. dma_pool_destroy(dpaa2_chan->fl_pool);
  66. dma_pool_destroy(dpaa2_chan->sdd_pool);
  67. dpaa2_qdma->desc_allocated--;
  68. }
  69. /*
  70. * Request a command descriptor for enqueue.
  71. */
  72. static struct dpaa2_qdma_comp *
  73. dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
  74. {
  75. struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
  76. struct device *dev = &qdma_priv->dpdmai_dev->dev;
  77. struct dpaa2_qdma_comp *comp_temp = NULL;
  78. unsigned long flags;
  79. spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  80. if (list_empty(&dpaa2_chan->comp_free)) {
  81. spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  82. comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
  83. if (!comp_temp)
  84. goto err;
  85. comp_temp->fd_virt_addr =
  86. dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
  87. &comp_temp->fd_bus_addr);
  88. if (!comp_temp->fd_virt_addr)
  89. goto err_comp;
  90. comp_temp->fl_virt_addr =
  91. dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
  92. &comp_temp->fl_bus_addr);
  93. if (!comp_temp->fl_virt_addr)
  94. goto err_fd_virt;
  95. comp_temp->desc_virt_addr =
  96. dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
  97. &comp_temp->desc_bus_addr);
  98. if (!comp_temp->desc_virt_addr)
  99. goto err_fl_virt;
  100. comp_temp->qchan = dpaa2_chan;
  101. return comp_temp;
  102. }
  103. comp_temp = list_first_entry(&dpaa2_chan->comp_free,
  104. struct dpaa2_qdma_comp, list);
  105. list_del(&comp_temp->list);
  106. spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  107. comp_temp->qchan = dpaa2_chan;
  108. return comp_temp;
  109. err_fl_virt:
  110. dma_pool_free(dpaa2_chan->fl_pool,
  111. comp_temp->fl_virt_addr,
  112. comp_temp->fl_bus_addr);
  113. err_fd_virt:
  114. dma_pool_free(dpaa2_chan->fd_pool,
  115. comp_temp->fd_virt_addr,
  116. comp_temp->fd_bus_addr);
  117. err_comp:
  118. kfree(comp_temp);
  119. err:
  120. dev_err(dev, "Failed to request descriptor\n");
  121. return NULL;
  122. }
  123. static void
  124. dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
  125. {
  126. struct dpaa2_fd *fd;
  127. fd = dpaa2_comp->fd_virt_addr;
  128. memset(fd, 0, sizeof(struct dpaa2_fd));
  129. /* fd populated */
  130. dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
  131. /*
  132. * Bypass memory translation, Frame list format, short length disable
  133. * we need to disable BMT if fsl-mc use iova addr
  134. */
  135. if (smmu_disable)
  136. dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
  137. dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
  138. dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
  139. }
  140. /* first frame list for descriptor buffer */
  141. static void
  142. dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
  143. struct dpaa2_qdma_comp *dpaa2_comp,
  144. bool wrt_changed)
  145. {
  146. struct dpaa2_qdma_sd_d *sdd;
  147. sdd = dpaa2_comp->desc_virt_addr;
  148. memset(sdd, 0, 2 * (sizeof(*sdd)));
  149. /* source descriptor CMD */
  150. sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
  151. sdd++;
  152. /* dest descriptor CMD */
  153. if (wrt_changed)
  154. sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
  155. else
  156. sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
  157. memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
  158. /* first frame list to source descriptor */
  159. dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
  160. dpaa2_fl_set_len(f_list, 0x20);
  161. dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
  162. /* bypass memory translation */
  163. if (smmu_disable)
  164. f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
  165. }
  166. /* source and destination frame list */
  167. static void
  168. dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
  169. dma_addr_t dst, dma_addr_t src,
  170. size_t len, uint8_t fmt)
  171. {
  172. /* source frame list to source buffer */
  173. memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
  174. dpaa2_fl_set_addr(f_list, src);
  175. dpaa2_fl_set_len(f_list, len);
  176. /* single buffer frame or scatter gather frame */
  177. dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
  178. /* bypass memory translation */
  179. if (smmu_disable)
  180. f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
  181. f_list++;
  182. /* destination frame list to destination buffer */
  183. memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
  184. dpaa2_fl_set_addr(f_list, dst);
  185. dpaa2_fl_set_len(f_list, len);
  186. dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
  187. /* single buffer frame or scatter gather frame */
  188. dpaa2_fl_set_final(f_list, QDMA_FL_F);
  189. /* bypass memory translation */
  190. if (smmu_disable)
  191. f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
  192. }
  193. static struct dma_async_tx_descriptor
  194. *dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
  195. dma_addr_t src, size_t len, ulong flags)
  196. {
  197. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  198. struct dpaa2_qdma_engine *dpaa2_qdma;
  199. struct dpaa2_qdma_comp *dpaa2_comp;
  200. struct dpaa2_fl_entry *f_list;
  201. bool wrt_changed;
  202. dpaa2_qdma = dpaa2_chan->qdma;
  203. dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
  204. if (!dpaa2_comp)
  205. return NULL;
  206. wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
  207. /* populate Frame descriptor */
  208. dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
  209. f_list = dpaa2_comp->fl_virt_addr;
  210. /* first frame list for descriptor buffer (logn format) */
  211. dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
  212. f_list++;
  213. dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
  214. return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
  215. }
  216. static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
  217. {
  218. struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  219. struct dpaa2_qdma_comp *dpaa2_comp;
  220. struct virt_dma_desc *vdesc;
  221. struct dpaa2_fd *fd;
  222. unsigned long flags;
  223. int err;
  224. spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  225. spin_lock(&dpaa2_chan->vchan.lock);
  226. if (vchan_issue_pending(&dpaa2_chan->vchan)) {
  227. vdesc = vchan_next_desc(&dpaa2_chan->vchan);
  228. if (!vdesc)
  229. goto err_enqueue;
  230. dpaa2_comp = to_fsl_qdma_comp(vdesc);
  231. fd = dpaa2_comp->fd_virt_addr;
  232. list_del(&vdesc->node);
  233. list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
  234. err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
  235. if (err) {
  236. list_del(&dpaa2_comp->list);
  237. list_add_tail(&dpaa2_comp->list,
  238. &dpaa2_chan->comp_free);
  239. }
  240. }
  241. err_enqueue:
  242. spin_unlock(&dpaa2_chan->vchan.lock);
  243. spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
  244. }
  245. static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
  246. {
  247. struct dpaa2_qdma_priv_per_prio *ppriv;
  248. struct device *dev = &ls_dev->dev;
  249. struct dpaa2_qdma_priv *priv;
  250. u8 prio_def = DPDMAI_PRIO_NUM;
  251. int err = -EINVAL;
  252. int i;
  253. priv = dev_get_drvdata(dev);
  254. priv->dev = dev;
  255. priv->dpqdma_id = ls_dev->obj_desc.id;
  256. /* Get the handle for the DPDMAI this interface is associate with */
  257. err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
  258. if (err) {
  259. dev_err(dev, "dpdmai_open() failed\n");
  260. return err;
  261. }
  262. dev_dbg(dev, "Opened dpdmai object successfully\n");
  263. err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
  264. &priv->dpdmai_attr);
  265. if (err) {
  266. dev_err(dev, "dpdmai_get_attributes() failed\n");
  267. goto exit;
  268. }
  269. if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
  270. err = -EINVAL;
  271. dev_err(dev, "DPDMAI major version mismatch\n"
  272. "Found %u.%u, supported version is %u.%u\n",
  273. priv->dpdmai_attr.version.major,
  274. priv->dpdmai_attr.version.minor,
  275. DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  276. goto exit;
  277. }
  278. if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
  279. err = -EINVAL;
  280. dev_err(dev, "DPDMAI minor version mismatch\n"
  281. "Found %u.%u, supported version is %u.%u\n",
  282. priv->dpdmai_attr.version.major,
  283. priv->dpdmai_attr.version.minor,
  284. DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
  285. goto exit;
  286. }
  287. priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
  288. ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
  289. if (!ppriv) {
  290. err = -ENOMEM;
  291. goto exit;
  292. }
  293. priv->ppriv = ppriv;
  294. for (i = 0; i < priv->num_pairs; i++) {
  295. err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  296. i, &priv->rx_queue_attr[i]);
  297. if (err) {
  298. dev_err(dev, "dpdmai_get_rx_queue() failed\n");
  299. goto exit;
  300. }
  301. ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
  302. err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  303. i, &priv->tx_fqid[i]);
  304. if (err) {
  305. dev_err(dev, "dpdmai_get_tx_queue() failed\n");
  306. goto exit;
  307. }
  308. ppriv->req_fqid = priv->tx_fqid[i];
  309. ppriv->prio = i;
  310. ppriv->priv = priv;
  311. ppriv++;
  312. }
  313. return 0;
  314. exit:
  315. dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  316. return err;
  317. }
  318. static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
  319. {
  320. struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
  321. struct dpaa2_qdma_priv_per_prio, nctx);
  322. struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
  323. struct dpaa2_qdma_priv *priv = ppriv->priv;
  324. u32 n_chans = priv->dpaa2_qdma->n_chans;
  325. struct dpaa2_qdma_chan *qchan;
  326. const struct dpaa2_fd *fd_eq;
  327. const struct dpaa2_fd *fd;
  328. struct dpaa2_dq *dq;
  329. int is_last = 0;
  330. int found;
  331. u8 status;
  332. int err;
  333. int i;
  334. do {
  335. err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
  336. ppriv->store);
  337. } while (err);
  338. while (!is_last) {
  339. do {
  340. dq = dpaa2_io_store_next(ppriv->store, &is_last);
  341. } while (!is_last && !dq);
  342. if (!dq) {
  343. dev_err(priv->dev, "FQID returned no valid frames!\n");
  344. continue;
  345. }
  346. /* obtain FD and process the error */
  347. fd = dpaa2_dq_fd(dq);
  348. status = dpaa2_fd_get_ctrl(fd) & 0xff;
  349. if (status)
  350. dev_err(priv->dev, "FD error occurred\n");
  351. found = 0;
  352. for (i = 0; i < n_chans; i++) {
  353. qchan = &priv->dpaa2_qdma->chans[i];
  354. spin_lock(&qchan->queue_lock);
  355. if (list_empty(&qchan->comp_used)) {
  356. spin_unlock(&qchan->queue_lock);
  357. continue;
  358. }
  359. list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
  360. &qchan->comp_used, list) {
  361. fd_eq = dpaa2_comp->fd_virt_addr;
  362. if (le64_to_cpu(fd_eq->simple.addr) ==
  363. le64_to_cpu(fd->simple.addr)) {
  364. spin_lock(&qchan->vchan.lock);
  365. vchan_cookie_complete(&
  366. dpaa2_comp->vdesc);
  367. spin_unlock(&qchan->vchan.lock);
  368. found = 1;
  369. break;
  370. }
  371. }
  372. spin_unlock(&qchan->queue_lock);
  373. if (found)
  374. break;
  375. }
  376. }
  377. dpaa2_io_service_rearm(NULL, ctx);
  378. }
  379. static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
  380. {
  381. struct dpaa2_qdma_priv_per_prio *ppriv;
  382. struct device *dev = priv->dev;
  383. int err = -EINVAL;
  384. int i, num;
  385. num = priv->num_pairs;
  386. ppriv = priv->ppriv;
  387. for (i = 0; i < num; i++) {
  388. ppriv->nctx.is_cdan = 0;
  389. ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
  390. ppriv->nctx.id = ppriv->rsp_fqid;
  391. ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
  392. err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
  393. if (err) {
  394. dev_err(dev, "Notification register failed\n");
  395. goto err_service;
  396. }
  397. ppriv->store =
  398. dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
  399. if (!ppriv->store) {
  400. err = -ENOMEM;
  401. dev_err(dev, "dpaa2_io_store_create() failed\n");
  402. goto err_store;
  403. }
  404. ppriv++;
  405. }
  406. return 0;
  407. err_store:
  408. dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
  409. err_service:
  410. ppriv--;
  411. while (ppriv >= priv->ppriv) {
  412. dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
  413. dpaa2_io_store_destroy(ppriv->store);
  414. ppriv--;
  415. }
  416. return err;
  417. }
  418. static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
  419. {
  420. struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  421. int i;
  422. for (i = 0; i < priv->num_pairs; i++) {
  423. dpaa2_io_store_destroy(ppriv->store);
  424. ppriv++;
  425. }
  426. }
  427. static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
  428. {
  429. struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  430. struct device *dev = priv->dev;
  431. int i;
  432. for (i = 0; i < priv->num_pairs; i++) {
  433. dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
  434. ppriv++;
  435. }
  436. }
  437. static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
  438. {
  439. struct dpdmai_rx_queue_cfg rx_queue_cfg;
  440. struct dpaa2_qdma_priv_per_prio *ppriv;
  441. struct device *dev = priv->dev;
  442. struct fsl_mc_device *ls_dev;
  443. int i, num;
  444. int err;
  445. ls_dev = to_fsl_mc_device(dev);
  446. num = priv->num_pairs;
  447. ppriv = priv->ppriv;
  448. for (i = 0; i < num; i++) {
  449. rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
  450. DPDMAI_QUEUE_OPT_DEST;
  451. rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
  452. rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
  453. rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
  454. rx_queue_cfg.dest_cfg.priority = ppriv->prio;
  455. err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
  456. rx_queue_cfg.dest_cfg.priority,
  457. &rx_queue_cfg);
  458. if (err) {
  459. dev_err(dev, "dpdmai_set_rx_queue() failed\n");
  460. return err;
  461. }
  462. ppriv++;
  463. }
  464. return 0;
  465. }
  466. static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
  467. {
  468. struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
  469. struct device *dev = priv->dev;
  470. struct fsl_mc_device *ls_dev;
  471. int err = 0;
  472. int i;
  473. ls_dev = to_fsl_mc_device(dev);
  474. for (i = 0; i < priv->num_pairs; i++) {
  475. ppriv->nctx.qman64 = 0;
  476. ppriv->nctx.dpio_id = 0;
  477. ppriv++;
  478. }
  479. err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
  480. if (err)
  481. dev_err(dev, "dpdmai_reset() failed\n");
  482. return err;
  483. }
  484. static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
  485. struct list_head *head)
  486. {
  487. struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
  488. unsigned long flags;
  489. list_for_each_entry_safe(comp_tmp, _comp_tmp,
  490. head, list) {
  491. spin_lock_irqsave(&qchan->queue_lock, flags);
  492. list_del(&comp_tmp->list);
  493. spin_unlock_irqrestore(&qchan->queue_lock, flags);
  494. dma_pool_free(qchan->fd_pool,
  495. comp_tmp->fd_virt_addr,
  496. comp_tmp->fd_bus_addr);
  497. dma_pool_free(qchan->fl_pool,
  498. comp_tmp->fl_virt_addr,
  499. comp_tmp->fl_bus_addr);
  500. dma_pool_free(qchan->sdd_pool,
  501. comp_tmp->desc_virt_addr,
  502. comp_tmp->desc_bus_addr);
  503. kfree(comp_tmp);
  504. }
  505. }
  506. static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
  507. {
  508. struct dpaa2_qdma_chan *qchan;
  509. int num, i;
  510. num = dpaa2_qdma->n_chans;
  511. for (i = 0; i < num; i++) {
  512. qchan = &dpaa2_qdma->chans[i];
  513. dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
  514. dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
  515. dma_pool_destroy(qchan->fd_pool);
  516. dma_pool_destroy(qchan->fl_pool);
  517. dma_pool_destroy(qchan->sdd_pool);
  518. }
  519. }
  520. static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
  521. {
  522. struct dpaa2_qdma_comp *dpaa2_comp;
  523. struct dpaa2_qdma_chan *qchan;
  524. unsigned long flags;
  525. dpaa2_comp = to_fsl_qdma_comp(vdesc);
  526. qchan = dpaa2_comp->qchan;
  527. spin_lock_irqsave(&qchan->queue_lock, flags);
  528. list_del(&dpaa2_comp->list);
  529. list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
  530. spin_unlock_irqrestore(&qchan->queue_lock, flags);
  531. }
  532. static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
  533. {
  534. struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
  535. struct dpaa2_qdma_chan *dpaa2_chan;
  536. int num = priv->num_pairs;
  537. int i;
  538. INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
  539. for (i = 0; i < dpaa2_qdma->n_chans; i++) {
  540. dpaa2_chan = &dpaa2_qdma->chans[i];
  541. dpaa2_chan->qdma = dpaa2_qdma;
  542. dpaa2_chan->fqid = priv->tx_fqid[i % num];
  543. dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
  544. vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
  545. spin_lock_init(&dpaa2_chan->queue_lock);
  546. INIT_LIST_HEAD(&dpaa2_chan->comp_used);
  547. INIT_LIST_HEAD(&dpaa2_chan->comp_free);
  548. }
  549. return 0;
  550. }
  551. static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
  552. {
  553. struct device *dev = &dpdmai_dev->dev;
  554. struct dpaa2_qdma_engine *dpaa2_qdma;
  555. struct dpaa2_qdma_priv *priv;
  556. int err;
  557. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  558. if (!priv)
  559. return -ENOMEM;
  560. dev_set_drvdata(dev, priv);
  561. priv->dpdmai_dev = dpdmai_dev;
  562. priv->iommu_domain = iommu_get_domain_for_dev(dev);
  563. if (priv->iommu_domain)
  564. smmu_disable = false;
  565. /* obtain a MC portal */
  566. err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
  567. if (err) {
  568. if (err == -ENXIO)
  569. err = -EPROBE_DEFER;
  570. else
  571. dev_err(dev, "MC portal allocation failed\n");
  572. goto err_mcportal;
  573. }
  574. /* DPDMAI initialization */
  575. err = dpaa2_qdma_setup(dpdmai_dev);
  576. if (err) {
  577. dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
  578. goto err_dpdmai_setup;
  579. }
  580. /* DPIO */
  581. err = dpaa2_qdma_dpio_setup(priv);
  582. if (err) {
  583. dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
  584. goto err_dpio_setup;
  585. }
  586. /* DPDMAI binding to DPIO */
  587. err = dpaa2_dpdmai_bind(priv);
  588. if (err) {
  589. dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
  590. goto err_bind;
  591. }
  592. /* DPDMAI enable */
  593. err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  594. if (err) {
  595. dev_err(dev, "dpdmai_enable() faile\n");
  596. goto err_enable;
  597. }
  598. dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
  599. if (!dpaa2_qdma) {
  600. err = -ENOMEM;
  601. goto err_eng;
  602. }
  603. priv->dpaa2_qdma = dpaa2_qdma;
  604. dpaa2_qdma->priv = priv;
  605. dpaa2_qdma->desc_allocated = 0;
  606. dpaa2_qdma->n_chans = NUM_CH;
  607. dpaa2_dpdmai_init_channels(dpaa2_qdma);
  608. if (soc_device_match(soc_fixup_tuning))
  609. dpaa2_qdma->qdma_wrtype_fixup = true;
  610. else
  611. dpaa2_qdma->qdma_wrtype_fixup = false;
  612. dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
  613. dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
  614. dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
  615. dpaa2_qdma->dma_dev.dev = dev;
  616. dpaa2_qdma->dma_dev.device_alloc_chan_resources =
  617. dpaa2_qdma_alloc_chan_resources;
  618. dpaa2_qdma->dma_dev.device_free_chan_resources =
  619. dpaa2_qdma_free_chan_resources;
  620. dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
  621. dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
  622. dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
  623. err = dma_async_device_register(&dpaa2_qdma->dma_dev);
  624. if (err) {
  625. dev_err(dev, "Can't register NXP QDMA engine.\n");
  626. goto err_dpaa2_qdma;
  627. }
  628. return 0;
  629. err_dpaa2_qdma:
  630. kfree(dpaa2_qdma);
  631. err_eng:
  632. dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
  633. err_enable:
  634. dpaa2_dpdmai_dpio_unbind(priv);
  635. err_bind:
  636. dpaa2_dpmai_store_free(priv);
  637. dpaa2_dpdmai_dpio_free(priv);
  638. err_dpio_setup:
  639. kfree(priv->ppriv);
  640. dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
  641. err_dpdmai_setup:
  642. fsl_mc_portal_free(priv->mc_io);
  643. err_mcportal:
  644. kfree(priv);
  645. dev_set_drvdata(dev, NULL);
  646. return err;
  647. }
  648. static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
  649. {
  650. struct dpaa2_qdma_engine *dpaa2_qdma;
  651. struct dpaa2_qdma_priv *priv;
  652. struct device *dev;
  653. dev = &ls_dev->dev;
  654. priv = dev_get_drvdata(dev);
  655. dpaa2_qdma = priv->dpaa2_qdma;
  656. dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
  657. dpaa2_dpdmai_dpio_unbind(priv);
  658. dpaa2_dpmai_store_free(priv);
  659. dpaa2_dpdmai_dpio_free(priv);
  660. dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  661. fsl_mc_portal_free(priv->mc_io);
  662. dev_set_drvdata(dev, NULL);
  663. dpaa2_dpdmai_free_channels(dpaa2_qdma);
  664. dma_async_device_unregister(&dpaa2_qdma->dma_dev);
  665. kfree(priv);
  666. kfree(dpaa2_qdma);
  667. return 0;
  668. }
  669. static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
  670. {
  671. struct dpaa2_qdma_priv *priv;
  672. struct device *dev;
  673. dev = &ls_dev->dev;
  674. priv = dev_get_drvdata(dev);
  675. dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
  676. dpaa2_dpdmai_dpio_unbind(priv);
  677. dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
  678. dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
  679. }
  680. static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
  681. {
  682. .vendor = FSL_MC_VENDOR_FREESCALE,
  683. .obj_type = "dpdmai",
  684. },
  685. { .vendor = 0x0 }
  686. };
  687. static struct fsl_mc_driver dpaa2_qdma_driver = {
  688. .driver = {
  689. .name = "dpaa2-qdma",
  690. .owner = THIS_MODULE,
  691. },
  692. .probe = dpaa2_qdma_probe,
  693. .remove = dpaa2_qdma_remove,
  694. .shutdown = dpaa2_qdma_shutdown,
  695. .match_id_table = dpaa2_qdma_id_table
  696. };
  697. static int __init dpaa2_qdma_driver_init(void)
  698. {
  699. return fsl_mc_driver_register(&(dpaa2_qdma_driver));
  700. }
  701. late_initcall(dpaa2_qdma_driver_init);
  702. static void __exit fsl_qdma_exit(void)
  703. {
  704. fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
  705. }
  706. module_exit(fsl_qdma_exit);
  707. MODULE_ALIAS("platform:fsl-dpaa2-qdma");
  708. MODULE_LICENSE("GPL v2");
  709. MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");