k3-udma-glue.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * K3 NAVSS DMA glue interface
  4. *
  5. * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. */
  8. #include <linux/atomic.h>
  9. #include <linux/delay.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/io.h>
  12. #include <linux/init.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/soc/ti/k3-ringacc.h>
  16. #include <linux/dma/ti-cppi5.h>
  17. #include <linux/dma/k3-udma-glue.h>
  18. #include "k3-udma.h"
  19. #include "k3-psil-priv.h"
  20. struct k3_udma_glue_common {
  21. struct device *dev;
  22. struct udma_dev *udmax;
  23. const struct udma_tisci_rm *tisci_rm;
  24. struct k3_ringacc *ringacc;
  25. u32 src_thread;
  26. u32 dst_thread;
  27. u32 hdesc_size;
  28. bool epib;
  29. u32 psdata_size;
  30. u32 swdata_size;
  31. u32 atype;
  32. };
  33. struct k3_udma_glue_tx_channel {
  34. struct k3_udma_glue_common common;
  35. struct udma_tchan *udma_tchanx;
  36. int udma_tchan_id;
  37. struct k3_ring *ringtx;
  38. struct k3_ring *ringtxcq;
  39. bool psil_paired;
  40. int virq;
  41. atomic_t free_pkts;
  42. bool tx_pause_on_err;
  43. bool tx_filt_einfo;
  44. bool tx_filt_pswords;
  45. bool tx_supr_tdpkt;
  46. };
  47. struct k3_udma_glue_rx_flow {
  48. struct udma_rflow *udma_rflow;
  49. int udma_rflow_id;
  50. struct k3_ring *ringrx;
  51. struct k3_ring *ringrxfdq;
  52. int virq;
  53. };
  54. struct k3_udma_glue_rx_channel {
  55. struct k3_udma_glue_common common;
  56. struct udma_rchan *udma_rchanx;
  57. int udma_rchan_id;
  58. bool remote;
  59. bool psil_paired;
  60. u32 swdata_size;
  61. int flow_id_base;
  62. struct k3_udma_glue_rx_flow *flows;
  63. u32 flow_num;
  64. u32 flows_ready;
  65. };
  66. #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
  67. static int of_k3_udma_glue_parse(struct device_node *udmax_np,
  68. struct k3_udma_glue_common *common)
  69. {
  70. common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
  71. "ti,ringacc");
  72. if (IS_ERR(common->ringacc))
  73. return PTR_ERR(common->ringacc);
  74. common->udmax = of_xudma_dev_get(udmax_np, NULL);
  75. if (IS_ERR(common->udmax))
  76. return PTR_ERR(common->udmax);
  77. common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
  78. return 0;
  79. }
  80. static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
  81. const char *name, struct k3_udma_glue_common *common,
  82. bool tx_chn)
  83. {
  84. struct psil_endpoint_config *ep_config;
  85. struct of_phandle_args dma_spec;
  86. u32 thread_id;
  87. int ret = 0;
  88. int index;
  89. if (unlikely(!name))
  90. return -EINVAL;
  91. index = of_property_match_string(chn_np, "dma-names", name);
  92. if (index < 0)
  93. return index;
  94. if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
  95. &dma_spec))
  96. return -ENOENT;
  97. thread_id = dma_spec.args[0];
  98. if (dma_spec.args_count == 2) {
  99. if (dma_spec.args[1] > 2) {
  100. dev_err(common->dev, "Invalid channel atype: %u\n",
  101. dma_spec.args[1]);
  102. ret = -EINVAL;
  103. goto out_put_spec;
  104. }
  105. common->atype = dma_spec.args[1];
  106. }
  107. if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
  108. ret = -EINVAL;
  109. goto out_put_spec;
  110. }
  111. if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
  112. ret = -EINVAL;
  113. goto out_put_spec;
  114. }
  115. /* get psil endpoint config */
  116. ep_config = psil_get_ep_config(thread_id);
  117. if (IS_ERR(ep_config)) {
  118. dev_err(common->dev,
  119. "No configuration for psi-l thread 0x%04x\n",
  120. thread_id);
  121. ret = PTR_ERR(ep_config);
  122. goto out_put_spec;
  123. }
  124. common->epib = ep_config->needs_epib;
  125. common->psdata_size = ep_config->psd_size;
  126. if (tx_chn)
  127. common->dst_thread = thread_id;
  128. else
  129. common->src_thread = thread_id;
  130. ret = of_k3_udma_glue_parse(dma_spec.np, common);
  131. out_put_spec:
  132. of_node_put(dma_spec.np);
  133. return ret;
  134. };
  135. static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  136. {
  137. struct device *dev = tx_chn->common.dev;
  138. dev_dbg(dev, "dump_tx_chn:\n"
  139. "udma_tchan_id: %d\n"
  140. "src_thread: %08x\n"
  141. "dst_thread: %08x\n",
  142. tx_chn->udma_tchan_id,
  143. tx_chn->common.src_thread,
  144. tx_chn->common.dst_thread);
  145. }
  146. static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
  147. char *mark)
  148. {
  149. struct device *dev = chn->common.dev;
  150. dev_dbg(dev, "=== dump ===> %s\n", mark);
  151. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
  152. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
  153. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
  154. xudma_tchanrt_read(chn->udma_tchanx,
  155. UDMA_CHAN_RT_PEER_RT_EN_REG));
  156. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
  157. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
  158. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
  159. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
  160. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
  161. xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
  162. }
  163. static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  164. {
  165. const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
  166. struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
  167. memset(&req, 0, sizeof(req));
  168. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
  169. TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
  170. TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
  171. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
  172. TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
  173. TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  174. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
  175. TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
  176. req.nav_id = tisci_rm->tisci_dev_id;
  177. req.index = tx_chn->udma_tchan_id;
  178. if (tx_chn->tx_pause_on_err)
  179. req.tx_pause_on_err = 1;
  180. if (tx_chn->tx_filt_einfo)
  181. req.tx_filt_einfo = 1;
  182. if (tx_chn->tx_filt_pswords)
  183. req.tx_filt_pswords = 1;
  184. req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  185. if (tx_chn->tx_supr_tdpkt)
  186. req.tx_supr_tdpkt = 1;
  187. req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
  188. req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
  189. req.tx_atype = tx_chn->common.atype;
  190. return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
  191. }
  192. struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
  193. const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
  194. {
  195. struct k3_udma_glue_tx_channel *tx_chn;
  196. int ret;
  197. tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
  198. if (!tx_chn)
  199. return ERR_PTR(-ENOMEM);
  200. tx_chn->common.dev = dev;
  201. tx_chn->common.swdata_size = cfg->swdata_size;
  202. tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
  203. tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
  204. tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
  205. tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
  206. /* parse of udmap channel */
  207. ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
  208. &tx_chn->common, true);
  209. if (ret)
  210. goto err;
  211. tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
  212. tx_chn->common.psdata_size,
  213. tx_chn->common.swdata_size);
  214. /* request and cfg UDMAP TX channel */
  215. tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
  216. if (IS_ERR(tx_chn->udma_tchanx)) {
  217. ret = PTR_ERR(tx_chn->udma_tchanx);
  218. dev_err(dev, "UDMAX tchanx get err %d\n", ret);
  219. goto err;
  220. }
  221. tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
  222. atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
  223. /* request and cfg rings */
  224. ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
  225. tx_chn->udma_tchan_id, -1,
  226. &tx_chn->ringtx,
  227. &tx_chn->ringtxcq);
  228. if (ret) {
  229. dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
  230. goto err;
  231. }
  232. ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
  233. if (ret) {
  234. dev_err(dev, "Failed to cfg ringtx %d\n", ret);
  235. goto err;
  236. }
  237. ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
  238. if (ret) {
  239. dev_err(dev, "Failed to cfg ringtx %d\n", ret);
  240. goto err;
  241. }
  242. /* request and cfg psi-l */
  243. tx_chn->common.src_thread =
  244. xudma_dev_get_psil_base(tx_chn->common.udmax) +
  245. tx_chn->udma_tchan_id;
  246. ret = k3_udma_glue_cfg_tx_chn(tx_chn);
  247. if (ret) {
  248. dev_err(dev, "Failed to cfg tchan %d\n", ret);
  249. goto err;
  250. }
  251. ret = xudma_navss_psil_pair(tx_chn->common.udmax,
  252. tx_chn->common.src_thread,
  253. tx_chn->common.dst_thread);
  254. if (ret) {
  255. dev_err(dev, "PSI-L request err %d\n", ret);
  256. goto err;
  257. }
  258. tx_chn->psil_paired = true;
  259. /* reset TX RT registers */
  260. k3_udma_glue_disable_tx_chn(tx_chn);
  261. k3_udma_glue_dump_tx_chn(tx_chn);
  262. return tx_chn;
  263. err:
  264. k3_udma_glue_release_tx_chn(tx_chn);
  265. return ERR_PTR(ret);
  266. }
  267. EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
  268. void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  269. {
  270. if (tx_chn->psil_paired) {
  271. xudma_navss_psil_unpair(tx_chn->common.udmax,
  272. tx_chn->common.src_thread,
  273. tx_chn->common.dst_thread);
  274. tx_chn->psil_paired = false;
  275. }
  276. if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
  277. xudma_tchan_put(tx_chn->common.udmax,
  278. tx_chn->udma_tchanx);
  279. if (tx_chn->ringtxcq)
  280. k3_ringacc_ring_free(tx_chn->ringtxcq);
  281. if (tx_chn->ringtx)
  282. k3_ringacc_ring_free(tx_chn->ringtx);
  283. }
  284. EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
  285. int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  286. struct cppi5_host_desc_t *desc_tx,
  287. dma_addr_t desc_dma)
  288. {
  289. u32 ringtxcq_id;
  290. if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
  291. return -ENOMEM;
  292. ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
  293. cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
  294. return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
  295. }
  296. EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
  297. int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  298. dma_addr_t *desc_dma)
  299. {
  300. int ret;
  301. ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
  302. if (!ret)
  303. atomic_inc(&tx_chn->free_pkts);
  304. return ret;
  305. }
  306. EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
  307. int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  308. {
  309. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
  310. UDMA_PEER_RT_EN_ENABLE);
  311. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
  312. UDMA_CHAN_RT_CTL_EN);
  313. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
  314. return 0;
  315. }
  316. EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
  317. void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
  318. {
  319. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
  320. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
  321. xudma_tchanrt_write(tx_chn->udma_tchanx,
  322. UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
  323. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
  324. }
  325. EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
  326. void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  327. bool sync)
  328. {
  329. int i = 0;
  330. u32 val;
  331. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
  332. xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
  333. UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
  334. val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
  335. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  336. val = xudma_tchanrt_read(tx_chn->udma_tchanx,
  337. UDMA_CHAN_RT_CTL_REG);
  338. udelay(1);
  339. if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
  340. dev_err(tx_chn->common.dev, "TX tdown timeout\n");
  341. break;
  342. }
  343. i++;
  344. }
  345. val = xudma_tchanrt_read(tx_chn->udma_tchanx,
  346. UDMA_CHAN_RT_PEER_RT_EN_REG);
  347. if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
  348. dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
  349. k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
  350. }
  351. EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
  352. void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
  353. void *data,
  354. void (*cleanup)(void *data, dma_addr_t desc_dma))
  355. {
  356. dma_addr_t desc_dma;
  357. int occ_tx, i, ret;
  358. /* reset TXCQ as it is not input for udma - expected to be empty */
  359. if (tx_chn->ringtxcq)
  360. k3_ringacc_ring_reset(tx_chn->ringtxcq);
  361. /*
  362. * TXQ reset need to be special way as it is input for udma and its
  363. * state cached by udma, so:
  364. * 1) save TXQ occ
  365. * 2) clean up TXQ and call callback .cleanup() for each desc
  366. * 3) reset TXQ in a special way
  367. */
  368. occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
  369. dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
  370. for (i = 0; i < occ_tx; i++) {
  371. ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
  372. if (ret) {
  373. dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
  374. break;
  375. }
  376. cleanup(data, desc_dma);
  377. }
  378. k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
  379. }
  380. EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
  381. u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
  382. {
  383. return tx_chn->common.hdesc_size;
  384. }
  385. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
  386. u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
  387. {
  388. return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
  389. }
  390. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
  391. int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
  392. {
  393. tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
  394. return tx_chn->virq;
  395. }
  396. EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
  397. static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  398. {
  399. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  400. struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
  401. int ret;
  402. memset(&req, 0, sizeof(req));
  403. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  404. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
  405. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
  406. TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
  407. TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
  408. TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
  409. req.nav_id = tisci_rm->tisci_dev_id;
  410. req.index = rx_chn->udma_rchan_id;
  411. req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
  412. /*
  413. * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
  414. * and udmax impl, so just configure it to invalid value.
  415. * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
  416. */
  417. req.rxcq_qnum = 0xFFFF;
  418. if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
  419. /* Default flow + extra ones */
  420. req.flowid_start = rx_chn->flow_id_base;
  421. req.flowid_cnt = rx_chn->flow_num;
  422. }
  423. req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  424. req.rx_atype = rx_chn->common.atype;
  425. ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
  426. if (ret)
  427. dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
  428. rx_chn->udma_rchan_id, ret);
  429. return ret;
  430. }
  431. static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
  432. u32 flow_num)
  433. {
  434. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  435. if (IS_ERR_OR_NULL(flow->udma_rflow))
  436. return;
  437. if (flow->ringrxfdq)
  438. k3_ringacc_ring_free(flow->ringrxfdq);
  439. if (flow->ringrx)
  440. k3_ringacc_ring_free(flow->ringrx);
  441. xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
  442. flow->udma_rflow = NULL;
  443. rx_chn->flows_ready--;
  444. }
  445. static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
  446. u32 flow_idx,
  447. struct k3_udma_glue_rx_flow_cfg *flow_cfg)
  448. {
  449. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
  450. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  451. struct device *dev = rx_chn->common.dev;
  452. struct ti_sci_msg_rm_udmap_flow_cfg req;
  453. int rx_ring_id;
  454. int rx_ringfdq_id;
  455. int ret = 0;
  456. flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
  457. flow->udma_rflow_id);
  458. if (IS_ERR(flow->udma_rflow)) {
  459. ret = PTR_ERR(flow->udma_rflow);
  460. dev_err(dev, "UDMAX rflow get err %d\n", ret);
  461. return ret;
  462. }
  463. if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
  464. ret = -ENODEV;
  465. goto err_rflow_put;
  466. }
  467. /* request and cfg rings */
  468. ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
  469. flow_cfg->ring_rxfdq0_id,
  470. flow_cfg->ring_rxq_id,
  471. &flow->ringrxfdq,
  472. &flow->ringrx);
  473. if (ret) {
  474. dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
  475. goto err_rflow_put;
  476. }
  477. ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
  478. if (ret) {
  479. dev_err(dev, "Failed to cfg ringrx %d\n", ret);
  480. goto err_ringrxfdq_free;
  481. }
  482. ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
  483. if (ret) {
  484. dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
  485. goto err_ringrxfdq_free;
  486. }
  487. if (rx_chn->remote) {
  488. rx_ring_id = TI_SCI_RESOURCE_NULL;
  489. rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
  490. } else {
  491. rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
  492. rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
  493. }
  494. memset(&req, 0, sizeof(req));
  495. req.valid_params =
  496. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
  497. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
  498. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
  499. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
  500. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  501. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
  502. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
  503. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
  504. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
  505. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  506. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  507. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  508. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
  509. req.nav_id = tisci_rm->tisci_dev_id;
  510. req.flow_index = flow->udma_rflow_id;
  511. if (rx_chn->common.epib)
  512. req.rx_einfo_present = 1;
  513. if (rx_chn->common.psdata_size)
  514. req.rx_psinfo_present = 1;
  515. if (flow_cfg->rx_error_handling)
  516. req.rx_error_handling = 1;
  517. req.rx_desc_type = 0;
  518. req.rx_dest_qnum = rx_ring_id;
  519. req.rx_src_tag_hi_sel = 0;
  520. req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
  521. req.rx_dest_tag_hi_sel = 0;
  522. req.rx_dest_tag_lo_sel = 0;
  523. req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
  524. req.rx_fdq1_qnum = rx_ringfdq_id;
  525. req.rx_fdq2_qnum = rx_ringfdq_id;
  526. req.rx_fdq3_qnum = rx_ringfdq_id;
  527. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
  528. if (ret) {
  529. dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
  530. ret);
  531. goto err_ringrxfdq_free;
  532. }
  533. rx_chn->flows_ready++;
  534. dev_dbg(dev, "flow%d config done. ready:%d\n",
  535. flow->udma_rflow_id, rx_chn->flows_ready);
  536. return 0;
  537. err_ringrxfdq_free:
  538. k3_ringacc_ring_free(flow->ringrxfdq);
  539. k3_ringacc_ring_free(flow->ringrx);
  540. err_rflow_put:
  541. xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
  542. flow->udma_rflow = NULL;
  543. return ret;
  544. }
  545. static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
  546. {
  547. struct device *dev = chn->common.dev;
  548. dev_dbg(dev, "dump_rx_chn:\n"
  549. "udma_rchan_id: %d\n"
  550. "src_thread: %08x\n"
  551. "dst_thread: %08x\n"
  552. "epib: %d\n"
  553. "hdesc_size: %u\n"
  554. "psdata_size: %u\n"
  555. "swdata_size: %u\n"
  556. "flow_id_base: %d\n"
  557. "flow_num: %d\n",
  558. chn->udma_rchan_id,
  559. chn->common.src_thread,
  560. chn->common.dst_thread,
  561. chn->common.epib,
  562. chn->common.hdesc_size,
  563. chn->common.psdata_size,
  564. chn->common.swdata_size,
  565. chn->flow_id_base,
  566. chn->flow_num);
  567. }
  568. static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
  569. char *mark)
  570. {
  571. struct device *dev = chn->common.dev;
  572. dev_dbg(dev, "=== dump ===> %s\n", mark);
  573. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
  574. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
  575. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
  576. xudma_rchanrt_read(chn->udma_rchanx,
  577. UDMA_CHAN_RT_PEER_RT_EN_REG));
  578. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
  579. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
  580. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
  581. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
  582. dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
  583. xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
  584. }
  585. static int
  586. k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
  587. struct k3_udma_glue_rx_channel_cfg *cfg)
  588. {
  589. int ret;
  590. /* default rflow */
  591. if (cfg->flow_id_use_rxchan_id)
  592. return 0;
  593. /* not a GP rflows */
  594. if (rx_chn->flow_id_base != -1 &&
  595. !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
  596. return 0;
  597. /* Allocate range of GP rflows */
  598. ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
  599. rx_chn->flow_id_base,
  600. rx_chn->flow_num);
  601. if (ret < 0) {
  602. dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
  603. rx_chn->flow_id_base, rx_chn->flow_num, ret);
  604. return ret;
  605. }
  606. rx_chn->flow_id_base = ret;
  607. return 0;
  608. }
  609. static struct k3_udma_glue_rx_channel *
  610. k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
  611. struct k3_udma_glue_rx_channel_cfg *cfg)
  612. {
  613. struct k3_udma_glue_rx_channel *rx_chn;
  614. int ret, i;
  615. if (cfg->flow_id_num <= 0)
  616. return ERR_PTR(-EINVAL);
  617. if (cfg->flow_id_num != 1 &&
  618. (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
  619. return ERR_PTR(-EINVAL);
  620. rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
  621. if (!rx_chn)
  622. return ERR_PTR(-ENOMEM);
  623. rx_chn->common.dev = dev;
  624. rx_chn->common.swdata_size = cfg->swdata_size;
  625. rx_chn->remote = false;
  626. /* parse of udmap channel */
  627. ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
  628. &rx_chn->common, false);
  629. if (ret)
  630. goto err;
  631. rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
  632. rx_chn->common.psdata_size,
  633. rx_chn->common.swdata_size);
  634. /* request and cfg UDMAP RX channel */
  635. rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
  636. if (IS_ERR(rx_chn->udma_rchanx)) {
  637. ret = PTR_ERR(rx_chn->udma_rchanx);
  638. dev_err(dev, "UDMAX rchanx get err %d\n", ret);
  639. goto err;
  640. }
  641. rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
  642. rx_chn->flow_num = cfg->flow_id_num;
  643. rx_chn->flow_id_base = cfg->flow_id_base;
  644. /* Use RX channel id as flow id: target dev can't generate flow_id */
  645. if (cfg->flow_id_use_rxchan_id)
  646. rx_chn->flow_id_base = rx_chn->udma_rchan_id;
  647. rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
  648. sizeof(*rx_chn->flows), GFP_KERNEL);
  649. if (!rx_chn->flows) {
  650. ret = -ENOMEM;
  651. goto err;
  652. }
  653. ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
  654. if (ret)
  655. goto err;
  656. for (i = 0; i < rx_chn->flow_num; i++)
  657. rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
  658. /* request and cfg psi-l */
  659. rx_chn->common.dst_thread =
  660. xudma_dev_get_psil_base(rx_chn->common.udmax) +
  661. rx_chn->udma_rchan_id;
  662. ret = k3_udma_glue_cfg_rx_chn(rx_chn);
  663. if (ret) {
  664. dev_err(dev, "Failed to cfg rchan %d\n", ret);
  665. goto err;
  666. }
  667. /* init default RX flow only if flow_num = 1 */
  668. if (cfg->def_flow_cfg) {
  669. ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
  670. if (ret)
  671. goto err;
  672. }
  673. ret = xudma_navss_psil_pair(rx_chn->common.udmax,
  674. rx_chn->common.src_thread,
  675. rx_chn->common.dst_thread);
  676. if (ret) {
  677. dev_err(dev, "PSI-L request err %d\n", ret);
  678. goto err;
  679. }
  680. rx_chn->psil_paired = true;
  681. /* reset RX RT registers */
  682. k3_udma_glue_disable_rx_chn(rx_chn);
  683. k3_udma_glue_dump_rx_chn(rx_chn);
  684. return rx_chn;
  685. err:
  686. k3_udma_glue_release_rx_chn(rx_chn);
  687. return ERR_PTR(ret);
  688. }
  689. static struct k3_udma_glue_rx_channel *
  690. k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
  691. struct k3_udma_glue_rx_channel_cfg *cfg)
  692. {
  693. struct k3_udma_glue_rx_channel *rx_chn;
  694. int ret, i;
  695. if (cfg->flow_id_num <= 0 ||
  696. cfg->flow_id_use_rxchan_id ||
  697. cfg->def_flow_cfg ||
  698. cfg->flow_id_base < 0)
  699. return ERR_PTR(-EINVAL);
  700. /*
  701. * Remote RX channel is under control of Remote CPU core, so
  702. * Linux can only request and manipulate by dedicated RX flows
  703. */
  704. rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
  705. if (!rx_chn)
  706. return ERR_PTR(-ENOMEM);
  707. rx_chn->common.dev = dev;
  708. rx_chn->common.swdata_size = cfg->swdata_size;
  709. rx_chn->remote = true;
  710. rx_chn->udma_rchan_id = -1;
  711. rx_chn->flow_num = cfg->flow_id_num;
  712. rx_chn->flow_id_base = cfg->flow_id_base;
  713. rx_chn->psil_paired = false;
  714. /* parse of udmap channel */
  715. ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
  716. &rx_chn->common, false);
  717. if (ret)
  718. goto err;
  719. rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
  720. rx_chn->common.psdata_size,
  721. rx_chn->common.swdata_size);
  722. rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
  723. sizeof(*rx_chn->flows), GFP_KERNEL);
  724. if (!rx_chn->flows) {
  725. ret = -ENOMEM;
  726. goto err;
  727. }
  728. ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
  729. if (ret)
  730. goto err;
  731. for (i = 0; i < rx_chn->flow_num; i++)
  732. rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
  733. k3_udma_glue_dump_rx_chn(rx_chn);
  734. return rx_chn;
  735. err:
  736. k3_udma_glue_release_rx_chn(rx_chn);
  737. return ERR_PTR(ret);
  738. }
  739. struct k3_udma_glue_rx_channel *
  740. k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
  741. struct k3_udma_glue_rx_channel_cfg *cfg)
  742. {
  743. if (cfg->remote)
  744. return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
  745. else
  746. return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
  747. }
  748. EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
  749. void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  750. {
  751. int i;
  752. if (IS_ERR_OR_NULL(rx_chn->common.udmax))
  753. return;
  754. if (rx_chn->psil_paired) {
  755. xudma_navss_psil_unpair(rx_chn->common.udmax,
  756. rx_chn->common.src_thread,
  757. rx_chn->common.dst_thread);
  758. rx_chn->psil_paired = false;
  759. }
  760. for (i = 0; i < rx_chn->flow_num; i++)
  761. k3_udma_glue_release_rx_flow(rx_chn, i);
  762. if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
  763. xudma_free_gp_rflow_range(rx_chn->common.udmax,
  764. rx_chn->flow_id_base,
  765. rx_chn->flow_num);
  766. if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
  767. xudma_rchan_put(rx_chn->common.udmax,
  768. rx_chn->udma_rchanx);
  769. }
  770. EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
  771. int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
  772. u32 flow_idx,
  773. struct k3_udma_glue_rx_flow_cfg *flow_cfg)
  774. {
  775. if (flow_idx >= rx_chn->flow_num)
  776. return -EINVAL;
  777. return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
  778. }
  779. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
  780. u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
  781. u32 flow_idx)
  782. {
  783. struct k3_udma_glue_rx_flow *flow;
  784. if (flow_idx >= rx_chn->flow_num)
  785. return -EINVAL;
  786. flow = &rx_chn->flows[flow_idx];
  787. return k3_ringacc_get_ring_id(flow->ringrxfdq);
  788. }
  789. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
  790. u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
  791. {
  792. return rx_chn->flow_id_base;
  793. }
  794. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
  795. int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
  796. u32 flow_idx)
  797. {
  798. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
  799. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  800. struct device *dev = rx_chn->common.dev;
  801. struct ti_sci_msg_rm_udmap_flow_cfg req;
  802. int rx_ring_id;
  803. int rx_ringfdq_id;
  804. int ret = 0;
  805. if (!rx_chn->remote)
  806. return -EINVAL;
  807. rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
  808. rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
  809. memset(&req, 0, sizeof(req));
  810. req.valid_params =
  811. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  812. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  813. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  814. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  815. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
  816. req.nav_id = tisci_rm->tisci_dev_id;
  817. req.flow_index = flow->udma_rflow_id;
  818. req.rx_dest_qnum = rx_ring_id;
  819. req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
  820. req.rx_fdq1_qnum = rx_ringfdq_id;
  821. req.rx_fdq2_qnum = rx_ringfdq_id;
  822. req.rx_fdq3_qnum = rx_ringfdq_id;
  823. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
  824. if (ret) {
  825. dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
  826. ret);
  827. }
  828. return ret;
  829. }
  830. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
  831. int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
  832. u32 flow_idx)
  833. {
  834. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
  835. const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
  836. struct device *dev = rx_chn->common.dev;
  837. struct ti_sci_msg_rm_udmap_flow_cfg req;
  838. int ret = 0;
  839. if (!rx_chn->remote)
  840. return -EINVAL;
  841. memset(&req, 0, sizeof(req));
  842. req.valid_params =
  843. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  844. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  845. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  846. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  847. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
  848. req.nav_id = tisci_rm->tisci_dev_id;
  849. req.flow_index = flow->udma_rflow_id;
  850. req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
  851. req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
  852. req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
  853. req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
  854. req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
  855. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
  856. if (ret) {
  857. dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
  858. ret);
  859. }
  860. return ret;
  861. }
  862. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
  863. int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  864. {
  865. if (rx_chn->remote)
  866. return -EINVAL;
  867. if (rx_chn->flows_ready < rx_chn->flow_num)
  868. return -EINVAL;
  869. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
  870. UDMA_CHAN_RT_CTL_EN);
  871. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
  872. UDMA_PEER_RT_EN_ENABLE);
  873. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
  874. return 0;
  875. }
  876. EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
  877. void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
  878. {
  879. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
  880. xudma_rchanrt_write(rx_chn->udma_rchanx,
  881. UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
  882. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
  883. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
  884. }
  885. EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
  886. void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  887. bool sync)
  888. {
  889. int i = 0;
  890. u32 val;
  891. if (rx_chn->remote)
  892. return;
  893. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
  894. xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
  895. UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
  896. val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
  897. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  898. val = xudma_rchanrt_read(rx_chn->udma_rchanx,
  899. UDMA_CHAN_RT_CTL_REG);
  900. udelay(1);
  901. if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
  902. dev_err(rx_chn->common.dev, "RX tdown timeout\n");
  903. break;
  904. }
  905. i++;
  906. }
  907. val = xudma_rchanrt_read(rx_chn->udma_rchanx,
  908. UDMA_CHAN_RT_PEER_RT_EN_REG);
  909. if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
  910. dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
  911. k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
  912. }
  913. EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
  914. void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  915. u32 flow_num, void *data,
  916. void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
  917. {
  918. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  919. struct device *dev = rx_chn->common.dev;
  920. dma_addr_t desc_dma;
  921. int occ_rx, i, ret;
  922. /* reset RXCQ as it is not input for udma - expected to be empty */
  923. occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
  924. dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
  925. if (flow->ringrx)
  926. k3_ringacc_ring_reset(flow->ringrx);
  927. /* Skip RX FDQ in case one FDQ is used for the set of flows */
  928. if (skip_fdq)
  929. return;
  930. /*
  931. * RX FDQ reset need to be special way as it is input for udma and its
  932. * state cached by udma, so:
  933. * 1) save RX FDQ occ
  934. * 2) clean up RX FDQ and call callback .cleanup() for each desc
  935. * 3) reset RX FDQ in a special way
  936. */
  937. occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
  938. dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
  939. for (i = 0; i < occ_rx; i++) {
  940. ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
  941. if (ret) {
  942. dev_err(dev, "RX reset pop %d\n", ret);
  943. break;
  944. }
  945. cleanup(data, desc_dma);
  946. }
  947. k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
  948. }
  949. EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
  950. int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  951. u32 flow_num, struct cppi5_host_desc_t *desc_rx,
  952. dma_addr_t desc_dma)
  953. {
  954. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  955. return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
  956. }
  957. EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
  958. int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
  959. u32 flow_num, dma_addr_t *desc_dma)
  960. {
  961. struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
  962. return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
  963. }
  964. EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
  965. int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
  966. u32 flow_num)
  967. {
  968. struct k3_udma_glue_rx_flow *flow;
  969. flow = &rx_chn->flows[flow_num];
  970. flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
  971. return flow->virq;
  972. }
  973. EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);