k3-udma.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
  4. * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
  5. */
  6. #define pr_fmt(fmt) "udma: " fmt
  7. #include <common.h>
  8. #include <cpu_func.h>
  9. #include <asm/io.h>
  10. #include <asm/bitops.h>
  11. #include <malloc.h>
  12. #include <linux/dma-mapping.h>
  13. #include <dm.h>
  14. #include <dm/device_compat.h>
  15. #include <dm/devres.h>
  16. #include <dm/read.h>
  17. #include <dm/of_access.h>
  18. #include <dma.h>
  19. #include <dma-uclass.h>
  20. #include <linux/delay.h>
  21. #include <dt-bindings/dma/k3-udma.h>
  22. #include <linux/bitmap.h>
  23. #include <linux/err.h>
  24. #include <linux/soc/ti/k3-navss-ringacc.h>
  25. #include <linux/soc/ti/cppi5.h>
  26. #include <linux/soc/ti/ti-udma.h>
  27. #include <linux/soc/ti/ti_sci_protocol.h>
  28. #include "k3-udma-hwdef.h"
  29. #if BITS_PER_LONG == 64
  30. #define RINGACC_RING_USE_PROXY (0)
  31. #else
  32. #define RINGACC_RING_USE_PROXY (1)
  33. #endif
  34. #define K3_UDMA_MAX_RFLOWS 1024
  35. struct udma_chan;
  36. enum udma_mmr {
  37. MMR_GCFG = 0,
  38. MMR_RCHANRT,
  39. MMR_TCHANRT,
  40. MMR_LAST,
  41. };
  42. static const char * const mmr_names[] = {
  43. "gcfg", "rchanrt", "tchanrt"
  44. };
  45. struct udma_tchan {
  46. void __iomem *reg_rt;
  47. int id;
  48. struct k3_nav_ring *t_ring; /* Transmit ring */
  49. struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
  50. };
  51. struct udma_rchan {
  52. void __iomem *reg_rt;
  53. int id;
  54. struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
  55. struct k3_nav_ring *r_ring; /* Receive ring*/
  56. };
  57. struct udma_rflow {
  58. int id;
  59. };
  60. enum udma_rm_range {
  61. RM_RANGE_TCHAN = 0,
  62. RM_RANGE_RCHAN,
  63. RM_RANGE_RFLOW,
  64. RM_RANGE_LAST,
  65. };
  66. struct udma_tisci_rm {
  67. const struct ti_sci_handle *tisci;
  68. const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
  69. u32 tisci_dev_id;
  70. /* tisci information for PSI-L thread pairing/unpairing */
  71. const struct ti_sci_rm_psil_ops *tisci_psil_ops;
  72. u32 tisci_navss_dev_id;
  73. struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
  74. };
  75. struct udma_dev {
  76. struct udevice *dev;
  77. void __iomem *mmrs[MMR_LAST];
  78. struct udma_tisci_rm tisci_rm;
  79. struct k3_nav_ringacc *ringacc;
  80. u32 features;
  81. int tchan_cnt;
  82. int echan_cnt;
  83. int rchan_cnt;
  84. int rflow_cnt;
  85. unsigned long *tchan_map;
  86. unsigned long *rchan_map;
  87. unsigned long *rflow_map;
  88. unsigned long *rflow_map_reserved;
  89. struct udma_tchan *tchans;
  90. struct udma_rchan *rchans;
  91. struct udma_rflow *rflows;
  92. struct udma_chan *channels;
  93. u32 psil_base;
  94. u32 ch_count;
  95. };
  96. struct udma_chan {
  97. struct udma_dev *ud;
  98. char name[20];
  99. struct udma_tchan *tchan;
  100. struct udma_rchan *rchan;
  101. struct udma_rflow *rflow;
  102. struct ti_udma_drv_chan_cfg_data cfg_data;
  103. u32 bcnt; /* number of bytes completed since the start of the channel */
  104. bool pkt_mode; /* TR or packet */
  105. bool needs_epib; /* EPIB is needed for the communication or not */
  106. u32 psd_size; /* size of Protocol Specific Data */
  107. u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
  108. int slave_thread_id;
  109. u32 src_thread;
  110. u32 dst_thread;
  111. u32 static_tr_type;
  112. u32 id;
  113. enum dma_direction dir;
  114. struct cppi5_host_desc_t *desc_tx;
  115. u32 hdesc_size;
  116. bool in_use;
  117. void *desc_rx;
  118. u32 num_rx_bufs;
  119. u32 desc_rx_cur;
  120. };
  121. #define UDMA_CH_1000(ch) (ch * 0x1000)
  122. #define UDMA_CH_100(ch) (ch * 0x100)
  123. #define UDMA_CH_40(ch) (ch * 0x40)
  124. #ifdef PKTBUFSRX
  125. #define UDMA_RX_DESC_NUM PKTBUFSRX
  126. #else
  127. #define UDMA_RX_DESC_NUM 4
  128. #endif
  129. /* Generic register access functions */
  130. static inline u32 udma_read(void __iomem *base, int reg)
  131. {
  132. u32 v;
  133. v = __raw_readl(base + reg);
  134. pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
  135. return v;
  136. }
  137. static inline void udma_write(void __iomem *base, int reg, u32 val)
  138. {
  139. pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
  140. __raw_writel(val, base + reg);
  141. }
  142. static inline void udma_update_bits(void __iomem *base, int reg,
  143. u32 mask, u32 val)
  144. {
  145. u32 tmp, orig;
  146. orig = udma_read(base, reg);
  147. tmp = orig & ~mask;
  148. tmp |= (val & mask);
  149. if (tmp != orig)
  150. udma_write(base, reg, tmp);
  151. }
  152. /* TCHANRT */
  153. static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
  154. {
  155. if (!tchan)
  156. return 0;
  157. return udma_read(tchan->reg_rt, reg);
  158. }
  159. static inline void udma_tchanrt_write(struct udma_tchan *tchan,
  160. int reg, u32 val)
  161. {
  162. if (!tchan)
  163. return;
  164. udma_write(tchan->reg_rt, reg, val);
  165. }
  166. /* RCHANRT */
  167. static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
  168. {
  169. if (!rchan)
  170. return 0;
  171. return udma_read(rchan->reg_rt, reg);
  172. }
  173. static inline void udma_rchanrt_write(struct udma_rchan *rchan,
  174. int reg, u32 val)
  175. {
  176. if (!rchan)
  177. return;
  178. udma_write(rchan->reg_rt, reg, val);
  179. }
  180. static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
  181. u32 dst_thread)
  182. {
  183. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  184. dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
  185. return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
  186. tisci_rm->tisci_navss_dev_id,
  187. src_thread, dst_thread);
  188. }
  189. static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
  190. u32 dst_thread)
  191. {
  192. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  193. dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
  194. return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
  195. tisci_rm->tisci_navss_dev_id,
  196. src_thread, dst_thread);
  197. }
  198. static inline char *udma_get_dir_text(enum dma_direction dir)
  199. {
  200. switch (dir) {
  201. case DMA_DEV_TO_MEM:
  202. return "DEV_TO_MEM";
  203. case DMA_MEM_TO_DEV:
  204. return "MEM_TO_DEV";
  205. case DMA_MEM_TO_MEM:
  206. return "MEM_TO_MEM";
  207. case DMA_DEV_TO_DEV:
  208. return "DEV_TO_DEV";
  209. default:
  210. break;
  211. }
  212. return "invalid";
  213. }
  214. static inline bool udma_is_chan_running(struct udma_chan *uc)
  215. {
  216. u32 trt_ctl = 0;
  217. u32 rrt_ctl = 0;
  218. switch (uc->dir) {
  219. case DMA_DEV_TO_MEM:
  220. rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  221. pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
  222. __func__, rrt_ctl,
  223. udma_rchanrt_read(uc->rchan,
  224. UDMA_RCHAN_RT_PEER_RT_EN_REG));
  225. break;
  226. case DMA_MEM_TO_DEV:
  227. trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  228. pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
  229. __func__, trt_ctl,
  230. udma_tchanrt_read(uc->tchan,
  231. UDMA_TCHAN_RT_PEER_RT_EN_REG));
  232. break;
  233. case DMA_MEM_TO_MEM:
  234. trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  235. rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  236. break;
  237. default:
  238. break;
  239. }
  240. if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
  241. return true;
  242. return false;
  243. }
  244. static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
  245. {
  246. struct k3_nav_ring *ring = NULL;
  247. int ret = -ENOENT;
  248. switch (uc->dir) {
  249. case DMA_DEV_TO_MEM:
  250. ring = uc->rchan->r_ring;
  251. break;
  252. case DMA_MEM_TO_DEV:
  253. ring = uc->tchan->tc_ring;
  254. break;
  255. case DMA_MEM_TO_MEM:
  256. ring = uc->tchan->tc_ring;
  257. break;
  258. default:
  259. break;
  260. }
  261. if (ring && k3_nav_ringacc_ring_get_occ(ring))
  262. ret = k3_nav_ringacc_ring_pop(ring, addr);
  263. return ret;
  264. }
  265. static void udma_reset_rings(struct udma_chan *uc)
  266. {
  267. struct k3_nav_ring *ring1 = NULL;
  268. struct k3_nav_ring *ring2 = NULL;
  269. switch (uc->dir) {
  270. case DMA_DEV_TO_MEM:
  271. ring1 = uc->rchan->fd_ring;
  272. ring2 = uc->rchan->r_ring;
  273. break;
  274. case DMA_MEM_TO_DEV:
  275. ring1 = uc->tchan->t_ring;
  276. ring2 = uc->tchan->tc_ring;
  277. break;
  278. case DMA_MEM_TO_MEM:
  279. ring1 = uc->tchan->t_ring;
  280. ring2 = uc->tchan->tc_ring;
  281. break;
  282. default:
  283. break;
  284. }
  285. if (ring1)
  286. k3_nav_ringacc_ring_reset_dma(ring1, 0);
  287. if (ring2)
  288. k3_nav_ringacc_ring_reset(ring2);
  289. }
  290. static void udma_reset_counters(struct udma_chan *uc)
  291. {
  292. u32 val;
  293. if (uc->tchan) {
  294. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
  295. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
  296. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
  297. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
  298. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
  299. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
  300. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
  301. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
  302. }
  303. if (uc->rchan) {
  304. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
  305. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
  306. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
  307. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
  308. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
  309. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
  310. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
  311. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
  312. }
  313. uc->bcnt = 0;
  314. }
  315. static inline int udma_stop_hard(struct udma_chan *uc)
  316. {
  317. pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
  318. switch (uc->dir) {
  319. case DMA_DEV_TO_MEM:
  320. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
  321. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
  322. break;
  323. case DMA_MEM_TO_DEV:
  324. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
  325. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
  326. break;
  327. case DMA_MEM_TO_MEM:
  328. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
  329. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
  330. break;
  331. default:
  332. return -EINVAL;
  333. }
  334. return 0;
  335. }
  336. static int udma_start(struct udma_chan *uc)
  337. {
  338. /* Channel is already running, no need to proceed further */
  339. if (udma_is_chan_running(uc))
  340. goto out;
  341. pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
  342. __func__, uc->id, udma_get_dir_text(uc->dir),
  343. uc->static_tr_type);
  344. /* Make sure that we clear the teardown bit, if it is set */
  345. udma_stop_hard(uc);
  346. /* Reset all counters */
  347. udma_reset_counters(uc);
  348. switch (uc->dir) {
  349. case DMA_DEV_TO_MEM:
  350. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
  351. UDMA_CHAN_RT_CTL_EN);
  352. /* Enable remote */
  353. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
  354. UDMA_PEER_RT_EN_ENABLE);
  355. pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
  356. __func__,
  357. udma_rchanrt_read(uc->rchan,
  358. UDMA_RCHAN_RT_CTL_REG),
  359. udma_rchanrt_read(uc->rchan,
  360. UDMA_RCHAN_RT_PEER_RT_EN_REG));
  361. break;
  362. case DMA_MEM_TO_DEV:
  363. /* Enable remote */
  364. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
  365. UDMA_PEER_RT_EN_ENABLE);
  366. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
  367. UDMA_CHAN_RT_CTL_EN);
  368. pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
  369. __func__,
  370. udma_tchanrt_read(uc->tchan,
  371. UDMA_TCHAN_RT_CTL_REG),
  372. udma_tchanrt_read(uc->tchan,
  373. UDMA_TCHAN_RT_PEER_RT_EN_REG));
  374. break;
  375. case DMA_MEM_TO_MEM:
  376. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
  377. UDMA_CHAN_RT_CTL_EN);
  378. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
  379. UDMA_CHAN_RT_CTL_EN);
  380. break;
  381. default:
  382. return -EINVAL;
  383. }
  384. pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
  385. out:
  386. return 0;
  387. }
  388. static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
  389. {
  390. int i = 0;
  391. u32 val;
  392. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
  393. UDMA_CHAN_RT_CTL_EN |
  394. UDMA_CHAN_RT_CTL_TDOWN);
  395. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  396. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  397. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  398. udelay(1);
  399. if (i > 1000) {
  400. printf(" %s TIMEOUT !\n", __func__);
  401. break;
  402. }
  403. i++;
  404. }
  405. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
  406. if (val & UDMA_PEER_RT_EN_ENABLE)
  407. printf("%s: peer not stopped TIMEOUT !\n", __func__);
  408. }
  409. static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
  410. {
  411. int i = 0;
  412. u32 val;
  413. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
  414. UDMA_PEER_RT_EN_ENABLE |
  415. UDMA_PEER_RT_EN_TEARDOWN);
  416. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  417. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  418. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  419. udelay(1);
  420. if (i > 1000) {
  421. printf("%s TIMEOUT !\n", __func__);
  422. break;
  423. }
  424. i++;
  425. }
  426. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
  427. if (val & UDMA_PEER_RT_EN_ENABLE)
  428. printf("%s: peer not stopped TIMEOUT !\n", __func__);
  429. }
  430. static inline int udma_stop(struct udma_chan *uc)
  431. {
  432. pr_debug("%s: chan:%d dir:%s\n",
  433. __func__, uc->id, udma_get_dir_text(uc->dir));
  434. udma_reset_counters(uc);
  435. switch (uc->dir) {
  436. case DMA_DEV_TO_MEM:
  437. udma_stop_dev2mem(uc, true);
  438. break;
  439. case DMA_MEM_TO_DEV:
  440. udma_stop_mem2dev(uc, true);
  441. break;
  442. case DMA_MEM_TO_MEM:
  443. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
  444. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
  445. break;
  446. default:
  447. return -EINVAL;
  448. }
  449. return 0;
  450. }
  451. static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
  452. {
  453. int i = 1;
  454. while (udma_pop_from_ring(uc, paddr)) {
  455. udelay(1);
  456. if (!(i % 1000000))
  457. printf(".");
  458. i++;
  459. }
  460. }
  461. static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
  462. {
  463. DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
  464. if (id >= 0) {
  465. if (test_bit(id, ud->rflow_map)) {
  466. dev_err(ud->dev, "rflow%d is in use\n", id);
  467. return ERR_PTR(-ENOENT);
  468. }
  469. } else {
  470. bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
  471. ud->rflow_cnt);
  472. id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
  473. if (id >= ud->rflow_cnt)
  474. return ERR_PTR(-ENOENT);
  475. }
  476. __set_bit(id, ud->rflow_map);
  477. return &ud->rflows[id];
  478. }
  479. #define UDMA_RESERVE_RESOURCE(res) \
  480. static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
  481. int id) \
  482. { \
  483. if (id >= 0) { \
  484. if (test_bit(id, ud->res##_map)) { \
  485. dev_err(ud->dev, "res##%d is in use\n", id); \
  486. return ERR_PTR(-ENOENT); \
  487. } \
  488. } else { \
  489. id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
  490. if (id == ud->res##_cnt) { \
  491. return ERR_PTR(-ENOENT); \
  492. } \
  493. } \
  494. \
  495. __set_bit(id, ud->res##_map); \
  496. return &ud->res##s[id]; \
  497. }
  498. UDMA_RESERVE_RESOURCE(tchan);
  499. UDMA_RESERVE_RESOURCE(rchan);
  500. static int udma_get_tchan(struct udma_chan *uc)
  501. {
  502. struct udma_dev *ud = uc->ud;
  503. if (uc->tchan) {
  504. dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
  505. uc->id, uc->tchan->id);
  506. return 0;
  507. }
  508. uc->tchan = __udma_reserve_tchan(ud, -1);
  509. if (IS_ERR(uc->tchan))
  510. return PTR_ERR(uc->tchan);
  511. pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
  512. return 0;
  513. }
  514. static int udma_get_rchan(struct udma_chan *uc)
  515. {
  516. struct udma_dev *ud = uc->ud;
  517. if (uc->rchan) {
  518. dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
  519. uc->id, uc->rchan->id);
  520. return 0;
  521. }
  522. uc->rchan = __udma_reserve_rchan(ud, -1);
  523. if (IS_ERR(uc->rchan))
  524. return PTR_ERR(uc->rchan);
  525. pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
  526. return 0;
  527. }
  528. static int udma_get_chan_pair(struct udma_chan *uc)
  529. {
  530. struct udma_dev *ud = uc->ud;
  531. int chan_id, end;
  532. if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
  533. dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
  534. uc->id, uc->tchan->id);
  535. return 0;
  536. }
  537. if (uc->tchan) {
  538. dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
  539. uc->id, uc->tchan->id);
  540. return -EBUSY;
  541. } else if (uc->rchan) {
  542. dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
  543. uc->id, uc->rchan->id);
  544. return -EBUSY;
  545. }
  546. /* Can be optimized, but let's have it like this for now */
  547. end = min(ud->tchan_cnt, ud->rchan_cnt);
  548. for (chan_id = 0; chan_id < end; chan_id++) {
  549. if (!test_bit(chan_id, ud->tchan_map) &&
  550. !test_bit(chan_id, ud->rchan_map))
  551. break;
  552. }
  553. if (chan_id == end)
  554. return -ENOENT;
  555. __set_bit(chan_id, ud->tchan_map);
  556. __set_bit(chan_id, ud->rchan_map);
  557. uc->tchan = &ud->tchans[chan_id];
  558. uc->rchan = &ud->rchans[chan_id];
  559. pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
  560. return 0;
  561. }
  562. static int udma_get_rflow(struct udma_chan *uc, int flow_id)
  563. {
  564. struct udma_dev *ud = uc->ud;
  565. if (uc->rflow) {
  566. dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
  567. uc->id, uc->rflow->id);
  568. return 0;
  569. }
  570. if (!uc->rchan)
  571. dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
  572. uc->rflow = __udma_reserve_rflow(ud, flow_id);
  573. if (IS_ERR(uc->rflow))
  574. return PTR_ERR(uc->rflow);
  575. pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
  576. return 0;
  577. }
  578. static void udma_put_rchan(struct udma_chan *uc)
  579. {
  580. struct udma_dev *ud = uc->ud;
  581. if (uc->rchan) {
  582. dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
  583. uc->rchan->id);
  584. __clear_bit(uc->rchan->id, ud->rchan_map);
  585. uc->rchan = NULL;
  586. }
  587. }
  588. static void udma_put_tchan(struct udma_chan *uc)
  589. {
  590. struct udma_dev *ud = uc->ud;
  591. if (uc->tchan) {
  592. dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
  593. uc->tchan->id);
  594. __clear_bit(uc->tchan->id, ud->tchan_map);
  595. uc->tchan = NULL;
  596. }
  597. }
  598. static void udma_put_rflow(struct udma_chan *uc)
  599. {
  600. struct udma_dev *ud = uc->ud;
  601. if (uc->rflow) {
  602. dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
  603. uc->rflow->id);
  604. __clear_bit(uc->rflow->id, ud->rflow_map);
  605. uc->rflow = NULL;
  606. }
  607. }
  608. static void udma_free_tx_resources(struct udma_chan *uc)
  609. {
  610. if (!uc->tchan)
  611. return;
  612. k3_nav_ringacc_ring_free(uc->tchan->t_ring);
  613. k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
  614. uc->tchan->t_ring = NULL;
  615. uc->tchan->tc_ring = NULL;
  616. udma_put_tchan(uc);
  617. }
  618. static int udma_alloc_tx_resources(struct udma_chan *uc)
  619. {
  620. struct k3_nav_ring_cfg ring_cfg;
  621. struct udma_dev *ud = uc->ud;
  622. int ret;
  623. ret = udma_get_tchan(uc);
  624. if (ret)
  625. return ret;
  626. uc->tchan->t_ring = k3_nav_ringacc_request_ring(
  627. ud->ringacc, uc->tchan->id,
  628. RINGACC_RING_USE_PROXY);
  629. if (!uc->tchan->t_ring) {
  630. ret = -EBUSY;
  631. goto err_tx_ring;
  632. }
  633. uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
  634. ud->ringacc, -1, RINGACC_RING_USE_PROXY);
  635. if (!uc->tchan->tc_ring) {
  636. ret = -EBUSY;
  637. goto err_txc_ring;
  638. }
  639. memset(&ring_cfg, 0, sizeof(ring_cfg));
  640. ring_cfg.size = 16;
  641. ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
  642. ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
  643. ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
  644. ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
  645. if (ret)
  646. goto err_ringcfg;
  647. return 0;
  648. err_ringcfg:
  649. k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
  650. uc->tchan->tc_ring = NULL;
  651. err_txc_ring:
  652. k3_nav_ringacc_ring_free(uc->tchan->t_ring);
  653. uc->tchan->t_ring = NULL;
  654. err_tx_ring:
  655. udma_put_tchan(uc);
  656. return ret;
  657. }
  658. static void udma_free_rx_resources(struct udma_chan *uc)
  659. {
  660. if (!uc->rchan)
  661. return;
  662. k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
  663. k3_nav_ringacc_ring_free(uc->rchan->r_ring);
  664. uc->rchan->fd_ring = NULL;
  665. uc->rchan->r_ring = NULL;
  666. udma_put_rflow(uc);
  667. udma_put_rchan(uc);
  668. }
  669. static int udma_alloc_rx_resources(struct udma_chan *uc)
  670. {
  671. struct k3_nav_ring_cfg ring_cfg;
  672. struct udma_dev *ud = uc->ud;
  673. int fd_ring_id;
  674. int ret;
  675. ret = udma_get_rchan(uc);
  676. if (ret)
  677. return ret;
  678. /* For MEM_TO_MEM we don't need rflow or rings */
  679. if (uc->dir == DMA_MEM_TO_MEM)
  680. return 0;
  681. ret = udma_get_rflow(uc, uc->rchan->id);
  682. if (ret) {
  683. ret = -EBUSY;
  684. goto err_rflow;
  685. }
  686. fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
  687. uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
  688. ud->ringacc, fd_ring_id,
  689. RINGACC_RING_USE_PROXY);
  690. if (!uc->rchan->fd_ring) {
  691. ret = -EBUSY;
  692. goto err_rx_ring;
  693. }
  694. uc->rchan->r_ring = k3_nav_ringacc_request_ring(
  695. ud->ringacc, -1, RINGACC_RING_USE_PROXY);
  696. if (!uc->rchan->r_ring) {
  697. ret = -EBUSY;
  698. goto err_rxc_ring;
  699. }
  700. memset(&ring_cfg, 0, sizeof(ring_cfg));
  701. ring_cfg.size = 16;
  702. ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
  703. ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
  704. ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
  705. ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
  706. if (ret)
  707. goto err_ringcfg;
  708. return 0;
  709. err_ringcfg:
  710. k3_nav_ringacc_ring_free(uc->rchan->r_ring);
  711. uc->rchan->r_ring = NULL;
  712. err_rxc_ring:
  713. k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
  714. uc->rchan->fd_ring = NULL;
  715. err_rx_ring:
  716. udma_put_rflow(uc);
  717. err_rflow:
  718. udma_put_rchan(uc);
  719. return ret;
  720. }
  721. static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
  722. {
  723. struct udma_dev *ud = uc->ud;
  724. int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  725. struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
  726. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  727. u32 mode;
  728. int ret;
  729. if (uc->pkt_mode)
  730. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  731. else
  732. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
  733. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
  734. TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  735. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
  736. req.nav_id = tisci_rm->tisci_dev_id;
  737. req.index = uc->tchan->id;
  738. req.tx_chan_type = mode;
  739. if (uc->dir == DMA_MEM_TO_MEM)
  740. req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
  741. else
  742. req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
  743. uc->psd_size,
  744. 0) >> 2;
  745. req.txcq_qnum = tc_ring;
  746. ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
  747. if (ret)
  748. dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
  749. return ret;
  750. }
  751. static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
  752. {
  753. struct udma_dev *ud = uc->ud;
  754. int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
  755. int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
  756. int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  757. struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
  758. struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
  759. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  760. u32 mode;
  761. int ret;
  762. if (uc->pkt_mode)
  763. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  764. else
  765. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
  766. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  767. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
  768. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
  769. req.nav_id = tisci_rm->tisci_dev_id;
  770. req.index = uc->rchan->id;
  771. req.rx_chan_type = mode;
  772. if (uc->dir == DMA_MEM_TO_MEM) {
  773. req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
  774. req.rxcq_qnum = tc_ring;
  775. } else {
  776. req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
  777. uc->psd_size,
  778. 0) >> 2;
  779. req.rxcq_qnum = rx_ring;
  780. }
  781. if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
  782. req.flowid_start = uc->rflow->id;
  783. req.flowid_cnt = 1;
  784. req.valid_params |=
  785. TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
  786. TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
  787. }
  788. ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
  789. if (ret) {
  790. dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
  791. uc->rchan->id, ret);
  792. return ret;
  793. }
  794. if (uc->dir == DMA_MEM_TO_MEM)
  795. return ret;
  796. flow_req.valid_params =
  797. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
  798. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
  799. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
  800. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
  801. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  802. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
  803. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
  804. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
  805. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
  806. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  807. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  808. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  809. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
  810. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
  811. flow_req.nav_id = tisci_rm->tisci_dev_id;
  812. flow_req.flow_index = uc->rflow->id;
  813. if (uc->needs_epib)
  814. flow_req.rx_einfo_present = 1;
  815. else
  816. flow_req.rx_einfo_present = 0;
  817. if (uc->psd_size)
  818. flow_req.rx_psinfo_present = 1;
  819. else
  820. flow_req.rx_psinfo_present = 0;
  821. flow_req.rx_error_handling = 0;
  822. flow_req.rx_desc_type = 0;
  823. flow_req.rx_dest_qnum = rx_ring;
  824. flow_req.rx_src_tag_hi_sel = 2;
  825. flow_req.rx_src_tag_lo_sel = 4;
  826. flow_req.rx_dest_tag_hi_sel = 5;
  827. flow_req.rx_dest_tag_lo_sel = 4;
  828. flow_req.rx_fdq0_sz0_qnum = fd_ring;
  829. flow_req.rx_fdq1_qnum = fd_ring;
  830. flow_req.rx_fdq2_qnum = fd_ring;
  831. flow_req.rx_fdq3_qnum = fd_ring;
  832. flow_req.rx_ps_location = 0;
  833. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
  834. &flow_req);
  835. if (ret)
  836. dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
  837. uc->rchan->id, uc->rflow->id, ret);
  838. return ret;
  839. }
  840. static int udma_alloc_chan_resources(struct udma_chan *uc)
  841. {
  842. struct udma_dev *ud = uc->ud;
  843. int ret;
  844. pr_debug("%s: chan:%d as %s\n",
  845. __func__, uc->id, udma_get_dir_text(uc->dir));
  846. switch (uc->dir) {
  847. case DMA_MEM_TO_MEM:
  848. /* Non synchronized - mem to mem type of transfer */
  849. ret = udma_get_chan_pair(uc);
  850. if (ret)
  851. return ret;
  852. ret = udma_alloc_tx_resources(uc);
  853. if (ret)
  854. goto err_free_res;
  855. ret = udma_alloc_rx_resources(uc);
  856. if (ret)
  857. goto err_free_res;
  858. uc->src_thread = ud->psil_base + uc->tchan->id;
  859. uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
  860. break;
  861. case DMA_MEM_TO_DEV:
  862. /* Slave transfer synchronized - mem to dev (TX) trasnfer */
  863. ret = udma_alloc_tx_resources(uc);
  864. if (ret)
  865. goto err_free_res;
  866. uc->src_thread = ud->psil_base + uc->tchan->id;
  867. uc->dst_thread = uc->slave_thread_id;
  868. if (!(uc->dst_thread & 0x8000))
  869. uc->dst_thread |= 0x8000;
  870. break;
  871. case DMA_DEV_TO_MEM:
  872. /* Slave transfer synchronized - dev to mem (RX) trasnfer */
  873. ret = udma_alloc_rx_resources(uc);
  874. if (ret)
  875. goto err_free_res;
  876. uc->src_thread = uc->slave_thread_id;
  877. uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
  878. break;
  879. default:
  880. /* Can not happen */
  881. pr_debug("%s: chan:%d invalid direction (%u)\n",
  882. __func__, uc->id, uc->dir);
  883. return -EINVAL;
  884. }
  885. /* We have channel indexes and rings */
  886. if (uc->dir == DMA_MEM_TO_MEM) {
  887. ret = udma_alloc_tchan_sci_req(uc);
  888. if (ret)
  889. goto err_free_res;
  890. ret = udma_alloc_rchan_sci_req(uc);
  891. if (ret)
  892. goto err_free_res;
  893. } else {
  894. /* Slave transfer */
  895. if (uc->dir == DMA_MEM_TO_DEV) {
  896. ret = udma_alloc_tchan_sci_req(uc);
  897. if (ret)
  898. goto err_free_res;
  899. } else {
  900. ret = udma_alloc_rchan_sci_req(uc);
  901. if (ret)
  902. goto err_free_res;
  903. }
  904. }
  905. if (udma_is_chan_running(uc)) {
  906. dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
  907. udma_stop(uc);
  908. if (udma_is_chan_running(uc)) {
  909. dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
  910. goto err_free_res;
  911. }
  912. }
  913. /* PSI-L pairing */
  914. ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
  915. if (ret) {
  916. dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
  917. goto err_free_res;
  918. }
  919. return 0;
  920. err_free_res:
  921. udma_free_tx_resources(uc);
  922. udma_free_rx_resources(uc);
  923. uc->slave_thread_id = -1;
  924. return ret;
  925. }
  926. static void udma_free_chan_resources(struct udma_chan *uc)
  927. {
  928. /* Some configuration to UDMA-P channel: disable, reset, whatever */
  929. /* Release PSI-L pairing */
  930. udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
  931. /* Reset the rings for a new start */
  932. udma_reset_rings(uc);
  933. udma_free_tx_resources(uc);
  934. udma_free_rx_resources(uc);
  935. uc->slave_thread_id = -1;
  936. uc->dir = DMA_MEM_TO_MEM;
  937. }
  938. static int udma_get_mmrs(struct udevice *dev)
  939. {
  940. struct udma_dev *ud = dev_get_priv(dev);
  941. int i;
  942. for (i = 0; i < MMR_LAST; i++) {
  943. ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
  944. mmr_names[i]);
  945. if (!ud->mmrs[i])
  946. return -EINVAL;
  947. }
  948. return 0;
  949. }
  950. static int udma_setup_resources(struct udma_dev *ud)
  951. {
  952. struct udevice *dev = ud->dev;
  953. int ch_count, i;
  954. u32 cap2, cap3;
  955. struct ti_sci_resource_desc *rm_desc;
  956. struct ti_sci_resource *rm_res;
  957. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  958. static const char * const range_names[] = { "ti,sci-rm-range-tchan",
  959. "ti,sci-rm-range-rchan",
  960. "ti,sci-rm-range-rflow" };
  961. cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
  962. cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
  963. ud->rflow_cnt = cap3 & 0x3fff;
  964. ud->tchan_cnt = cap2 & 0x1ff;
  965. ud->echan_cnt = (cap2 >> 9) & 0x1ff;
  966. ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
  967. ch_count = ud->tchan_cnt + ud->rchan_cnt;
  968. ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
  969. sizeof(unsigned long), GFP_KERNEL);
  970. ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
  971. GFP_KERNEL);
  972. ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
  973. sizeof(unsigned long), GFP_KERNEL);
  974. ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
  975. GFP_KERNEL);
  976. ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
  977. sizeof(unsigned long), GFP_KERNEL);
  978. ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
  979. sizeof(unsigned long),
  980. GFP_KERNEL);
  981. ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
  982. GFP_KERNEL);
  983. if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
  984. !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
  985. !ud->rflows)
  986. return -ENOMEM;
  987. /*
  988. * RX flows with the same Ids as RX channels are reserved to be used
  989. * as default flows if remote HW can't generate flow_ids. Those
  990. * RX flows can be requested only explicitly by id.
  991. */
  992. bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
  993. /* Get resource ranges from tisci */
  994. for (i = 0; i < RM_RANGE_LAST; i++)
  995. tisci_rm->rm_ranges[i] =
  996. devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
  997. tisci_rm->tisci_dev_id,
  998. (char *)range_names[i]);
  999. /* tchan ranges */
  1000. rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
  1001. if (IS_ERR(rm_res)) {
  1002. bitmap_zero(ud->tchan_map, ud->tchan_cnt);
  1003. } else {
  1004. bitmap_fill(ud->tchan_map, ud->tchan_cnt);
  1005. for (i = 0; i < rm_res->sets; i++) {
  1006. rm_desc = &rm_res->desc[i];
  1007. bitmap_clear(ud->tchan_map, rm_desc->start,
  1008. rm_desc->num);
  1009. }
  1010. }
  1011. /* rchan and matching default flow ranges */
  1012. rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
  1013. if (IS_ERR(rm_res)) {
  1014. bitmap_zero(ud->rchan_map, ud->rchan_cnt);
  1015. bitmap_zero(ud->rflow_map, ud->rchan_cnt);
  1016. } else {
  1017. bitmap_fill(ud->rchan_map, ud->rchan_cnt);
  1018. bitmap_fill(ud->rflow_map, ud->rchan_cnt);
  1019. for (i = 0; i < rm_res->sets; i++) {
  1020. rm_desc = &rm_res->desc[i];
  1021. bitmap_clear(ud->rchan_map, rm_desc->start,
  1022. rm_desc->num);
  1023. bitmap_clear(ud->rflow_map, rm_desc->start,
  1024. rm_desc->num);
  1025. }
  1026. }
  1027. /* GP rflow ranges */
  1028. rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
  1029. if (IS_ERR(rm_res)) {
  1030. bitmap_clear(ud->rflow_map, ud->rchan_cnt,
  1031. ud->rflow_cnt - ud->rchan_cnt);
  1032. } else {
  1033. bitmap_set(ud->rflow_map, ud->rchan_cnt,
  1034. ud->rflow_cnt - ud->rchan_cnt);
  1035. for (i = 0; i < rm_res->sets; i++) {
  1036. rm_desc = &rm_res->desc[i];
  1037. bitmap_clear(ud->rflow_map, rm_desc->start,
  1038. rm_desc->num);
  1039. }
  1040. }
  1041. ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
  1042. ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
  1043. if (!ch_count)
  1044. return -ENODEV;
  1045. ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
  1046. GFP_KERNEL);
  1047. if (!ud->channels)
  1048. return -ENOMEM;
  1049. dev_info(dev,
  1050. "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
  1051. ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
  1052. ud->rflow_cnt);
  1053. return ch_count;
  1054. }
  1055. static int udma_probe(struct udevice *dev)
  1056. {
  1057. struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
  1058. struct udma_dev *ud = dev_get_priv(dev);
  1059. int i, ret;
  1060. struct udevice *tmp;
  1061. struct udevice *tisci_dev = NULL;
  1062. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  1063. ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
  1064. ret = udma_get_mmrs(dev);
  1065. if (ret)
  1066. return ret;
  1067. ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
  1068. "ti,ringacc", &tmp);
  1069. ud->ringacc = dev_get_priv(tmp);
  1070. if (IS_ERR(ud->ringacc))
  1071. return PTR_ERR(ud->ringacc);
  1072. ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
  1073. if (!ud->psil_base) {
  1074. dev_info(dev,
  1075. "Missing ti,psil-base property, using %d.\n", ret);
  1076. return -EINVAL;
  1077. }
  1078. ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
  1079. "ti,sci", &tisci_dev);
  1080. if (ret) {
  1081. debug("Failed to get TISCI phandle (%d)\n", ret);
  1082. tisci_rm->tisci = NULL;
  1083. return -EINVAL;
  1084. }
  1085. tisci_rm->tisci = (struct ti_sci_handle *)
  1086. (ti_sci_get_handle_from_sysfw(tisci_dev));
  1087. tisci_rm->tisci_dev_id = -1;
  1088. ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
  1089. if (ret) {
  1090. dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
  1091. return ret;
  1092. }
  1093. tisci_rm->tisci_navss_dev_id = -1;
  1094. ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
  1095. &tisci_rm->tisci_navss_dev_id);
  1096. if (ret) {
  1097. dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
  1098. return ret;
  1099. }
  1100. tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
  1101. tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
  1102. ud->dev = dev;
  1103. ud->ch_count = udma_setup_resources(ud);
  1104. if (ud->ch_count <= 0)
  1105. return ud->ch_count;
  1106. dev_info(dev,
  1107. "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
  1108. ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
  1109. tisci_rm->tisci_dev_id);
  1110. dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
  1111. for (i = 0; i < ud->tchan_cnt; i++) {
  1112. struct udma_tchan *tchan = &ud->tchans[i];
  1113. tchan->id = i;
  1114. tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
  1115. }
  1116. for (i = 0; i < ud->rchan_cnt; i++) {
  1117. struct udma_rchan *rchan = &ud->rchans[i];
  1118. rchan->id = i;
  1119. rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
  1120. }
  1121. for (i = 0; i < ud->rflow_cnt; i++) {
  1122. struct udma_rflow *rflow = &ud->rflows[i];
  1123. rflow->id = i;
  1124. }
  1125. for (i = 0; i < ud->ch_count; i++) {
  1126. struct udma_chan *uc = &ud->channels[i];
  1127. uc->ud = ud;
  1128. uc->id = i;
  1129. uc->slave_thread_id = -1;
  1130. uc->tchan = NULL;
  1131. uc->rchan = NULL;
  1132. uc->dir = DMA_MEM_TO_MEM;
  1133. sprintf(uc->name, "UDMA chan%d\n", i);
  1134. if (!i)
  1135. uc->in_use = true;
  1136. }
  1137. pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
  1138. udma_read(ud->mmrs[MMR_GCFG], 0),
  1139. udma_read(ud->mmrs[MMR_GCFG], 0x20),
  1140. udma_read(ud->mmrs[MMR_GCFG], 0x24),
  1141. udma_read(ud->mmrs[MMR_GCFG], 0x28),
  1142. udma_read(ud->mmrs[MMR_GCFG], 0x2c));
  1143. uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
  1144. return ret;
  1145. }
  1146. static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
  1147. {
  1148. u64 addr = 0;
  1149. memcpy(&addr, &elem, sizeof(elem));
  1150. return k3_nav_ringacc_ring_push(ring, &addr);
  1151. }
  1152. static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
  1153. dma_addr_t src, size_t len)
  1154. {
  1155. u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  1156. struct cppi5_tr_type15_t *tr_req;
  1157. int num_tr;
  1158. size_t tr_size = sizeof(struct cppi5_tr_type15_t);
  1159. u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
  1160. unsigned long dummy;
  1161. void *tr_desc;
  1162. size_t desc_size;
  1163. if (len < SZ_64K) {
  1164. num_tr = 1;
  1165. tr0_cnt0 = len;
  1166. tr0_cnt1 = 1;
  1167. } else {
  1168. unsigned long align_to = __ffs(src | dest);
  1169. if (align_to > 3)
  1170. align_to = 3;
  1171. /*
  1172. * Keep simple: tr0: SZ_64K-alignment blocks,
  1173. * tr1: the remaining
  1174. */
  1175. num_tr = 2;
  1176. tr0_cnt0 = (SZ_64K - BIT(align_to));
  1177. if (len / tr0_cnt0 >= SZ_64K) {
  1178. dev_err(uc->ud->dev, "size %zu is not supported\n",
  1179. len);
  1180. return NULL;
  1181. }
  1182. tr0_cnt1 = len / tr0_cnt0;
  1183. tr1_cnt0 = len % tr0_cnt0;
  1184. }
  1185. desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
  1186. tr_desc = dma_alloc_coherent(desc_size, &dummy);
  1187. if (!tr_desc)
  1188. return NULL;
  1189. memset(tr_desc, 0, desc_size);
  1190. cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
  1191. cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
  1192. cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
  1193. tr_req = tr_desc + tr_size;
  1194. cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
  1195. CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
  1196. cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
  1197. tr_req[0].addr = src;
  1198. tr_req[0].icnt0 = tr0_cnt0;
  1199. tr_req[0].icnt1 = tr0_cnt1;
  1200. tr_req[0].icnt2 = 1;
  1201. tr_req[0].icnt3 = 1;
  1202. tr_req[0].dim1 = tr0_cnt0;
  1203. tr_req[0].daddr = dest;
  1204. tr_req[0].dicnt0 = tr0_cnt0;
  1205. tr_req[0].dicnt1 = tr0_cnt1;
  1206. tr_req[0].dicnt2 = 1;
  1207. tr_req[0].dicnt3 = 1;
  1208. tr_req[0].ddim1 = tr0_cnt0;
  1209. if (num_tr == 2) {
  1210. cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
  1211. CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
  1212. cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
  1213. tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
  1214. tr_req[1].icnt0 = tr1_cnt0;
  1215. tr_req[1].icnt1 = 1;
  1216. tr_req[1].icnt2 = 1;
  1217. tr_req[1].icnt3 = 1;
  1218. tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
  1219. tr_req[1].dicnt0 = tr1_cnt0;
  1220. tr_req[1].dicnt1 = 1;
  1221. tr_req[1].dicnt2 = 1;
  1222. tr_req[1].dicnt3 = 1;
  1223. }
  1224. cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
  1225. flush_dcache_range((unsigned long)tr_desc,
  1226. ALIGN((unsigned long)tr_desc + desc_size,
  1227. ARCH_DMA_MINALIGN));
  1228. udma_push_to_ring(uc->tchan->t_ring, tr_desc);
  1229. return 0;
  1230. }
  1231. static int udma_transfer(struct udevice *dev, int direction,
  1232. void *dst, void *src, size_t len)
  1233. {
  1234. struct udma_dev *ud = dev_get_priv(dev);
  1235. /* Channel0 is reserved for memcpy */
  1236. struct udma_chan *uc = &ud->channels[0];
  1237. dma_addr_t paddr = 0;
  1238. int ret;
  1239. ret = udma_alloc_chan_resources(uc);
  1240. if (ret)
  1241. return ret;
  1242. udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
  1243. udma_start(uc);
  1244. udma_poll_completion(uc, &paddr);
  1245. udma_stop(uc);
  1246. udma_free_chan_resources(uc);
  1247. return 0;
  1248. }
  1249. static int udma_request(struct dma *dma)
  1250. {
  1251. struct udma_dev *ud = dev_get_priv(dma->dev);
  1252. struct udma_chan *uc;
  1253. unsigned long dummy;
  1254. int ret;
  1255. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1256. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1257. return -EINVAL;
  1258. }
  1259. uc = &ud->channels[dma->id];
  1260. ret = udma_alloc_chan_resources(uc);
  1261. if (ret) {
  1262. dev_err(dma->dev, "alloc dma res failed %d\n", ret);
  1263. return -EINVAL;
  1264. }
  1265. uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
  1266. uc->psd_size, 0);
  1267. uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
  1268. if (uc->dir == DMA_MEM_TO_DEV) {
  1269. uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
  1270. memset(uc->desc_tx, 0, uc->hdesc_size);
  1271. } else {
  1272. uc->desc_rx = dma_alloc_coherent(
  1273. uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
  1274. memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
  1275. }
  1276. uc->in_use = true;
  1277. uc->desc_rx_cur = 0;
  1278. uc->num_rx_bufs = 0;
  1279. if (uc->dir == DMA_DEV_TO_MEM) {
  1280. uc->cfg_data.flow_id_base = uc->rflow->id;
  1281. uc->cfg_data.flow_id_cnt = 1;
  1282. }
  1283. return 0;
  1284. }
  1285. static int udma_rfree(struct dma *dma)
  1286. {
  1287. struct udma_dev *ud = dev_get_priv(dma->dev);
  1288. struct udma_chan *uc;
  1289. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1290. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1291. return -EINVAL;
  1292. }
  1293. uc = &ud->channels[dma->id];
  1294. if (udma_is_chan_running(uc))
  1295. udma_stop(uc);
  1296. udma_free_chan_resources(uc);
  1297. uc->in_use = false;
  1298. return 0;
  1299. }
  1300. static int udma_enable(struct dma *dma)
  1301. {
  1302. struct udma_dev *ud = dev_get_priv(dma->dev);
  1303. struct udma_chan *uc;
  1304. int ret;
  1305. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1306. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1307. return -EINVAL;
  1308. }
  1309. uc = &ud->channels[dma->id];
  1310. ret = udma_start(uc);
  1311. return ret;
  1312. }
  1313. static int udma_disable(struct dma *dma)
  1314. {
  1315. struct udma_dev *ud = dev_get_priv(dma->dev);
  1316. struct udma_chan *uc;
  1317. int ret = 0;
  1318. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1319. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1320. return -EINVAL;
  1321. }
  1322. uc = &ud->channels[dma->id];
  1323. if (udma_is_chan_running(uc))
  1324. ret = udma_stop(uc);
  1325. else
  1326. dev_err(dma->dev, "%s not running\n", __func__);
  1327. return ret;
  1328. }
  1329. static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
  1330. {
  1331. struct udma_dev *ud = dev_get_priv(dma->dev);
  1332. struct cppi5_host_desc_t *desc_tx;
  1333. dma_addr_t dma_src = (dma_addr_t)src;
  1334. struct ti_udma_drv_packet_data packet_data = { 0 };
  1335. dma_addr_t paddr;
  1336. struct udma_chan *uc;
  1337. u32 tc_ring_id;
  1338. int ret;
  1339. if (metadata)
  1340. packet_data = *((struct ti_udma_drv_packet_data *)metadata);
  1341. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1342. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1343. return -EINVAL;
  1344. }
  1345. uc = &ud->channels[dma->id];
  1346. if (uc->dir != DMA_MEM_TO_DEV)
  1347. return -EINVAL;
  1348. tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  1349. desc_tx = uc->desc_tx;
  1350. cppi5_hdesc_reset_hbdesc(desc_tx);
  1351. cppi5_hdesc_init(desc_tx,
  1352. uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
  1353. uc->psd_size);
  1354. cppi5_hdesc_set_pktlen(desc_tx, len);
  1355. cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
  1356. cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
  1357. cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
  1358. /* pass below information from caller */
  1359. cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
  1360. cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
  1361. flush_dcache_range((unsigned long)dma_src,
  1362. ALIGN((unsigned long)dma_src + len,
  1363. ARCH_DMA_MINALIGN));
  1364. flush_dcache_range((unsigned long)desc_tx,
  1365. ALIGN((unsigned long)desc_tx + uc->hdesc_size,
  1366. ARCH_DMA_MINALIGN));
  1367. ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
  1368. if (ret) {
  1369. dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
  1370. dma->id, ret);
  1371. return ret;
  1372. }
  1373. udma_poll_completion(uc, &paddr);
  1374. return 0;
  1375. }
  1376. static int udma_receive(struct dma *dma, void **dst, void *metadata)
  1377. {
  1378. struct udma_dev *ud = dev_get_priv(dma->dev);
  1379. struct cppi5_host_desc_t *desc_rx;
  1380. dma_addr_t buf_dma;
  1381. struct udma_chan *uc;
  1382. u32 buf_dma_len, pkt_len;
  1383. u32 port_id = 0;
  1384. int ret;
  1385. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1386. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1387. return -EINVAL;
  1388. }
  1389. uc = &ud->channels[dma->id];
  1390. if (uc->dir != DMA_DEV_TO_MEM)
  1391. return -EINVAL;
  1392. if (!uc->num_rx_bufs)
  1393. return -EINVAL;
  1394. ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
  1395. if (ret && ret != -ENODATA) {
  1396. dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
  1397. return ret;
  1398. } else if (ret == -ENODATA) {
  1399. return 0;
  1400. }
  1401. /* invalidate cache data */
  1402. invalidate_dcache_range((ulong)desc_rx,
  1403. (ulong)(desc_rx + uc->hdesc_size));
  1404. cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
  1405. pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
  1406. /* invalidate cache data */
  1407. invalidate_dcache_range((ulong)buf_dma,
  1408. (ulong)(buf_dma + buf_dma_len));
  1409. cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
  1410. *dst = (void *)buf_dma;
  1411. uc->num_rx_bufs--;
  1412. return pkt_len;
  1413. }
  1414. static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
  1415. {
  1416. struct udma_dev *ud = dev_get_priv(dma->dev);
  1417. struct udma_chan *uc = &ud->channels[0];
  1418. ofnode chconf_node, slave_node;
  1419. char prop[50];
  1420. u32 val;
  1421. for (val = 0; val < ud->ch_count; val++) {
  1422. uc = &ud->channels[val];
  1423. if (!uc->in_use)
  1424. break;
  1425. }
  1426. if (val == ud->ch_count)
  1427. return -EBUSY;
  1428. uc->dir = DMA_DEV_TO_MEM;
  1429. if (args->args[2] == UDMA_DIR_TX)
  1430. uc->dir = DMA_MEM_TO_DEV;
  1431. slave_node = ofnode_get_by_phandle(args->args[0]);
  1432. if (!ofnode_valid(slave_node)) {
  1433. dev_err(ud->dev, "slave node is missing\n");
  1434. return -EINVAL;
  1435. }
  1436. snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
  1437. chconf_node = ofnode_find_subnode(slave_node, prop);
  1438. if (!ofnode_valid(chconf_node)) {
  1439. dev_err(ud->dev, "Channel configuration node is missing\n");
  1440. return -EINVAL;
  1441. }
  1442. if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
  1443. if (val == UDMA_PKT_MODE)
  1444. uc->pkt_mode = true;
  1445. }
  1446. if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
  1447. uc->static_tr_type = val;
  1448. uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
  1449. if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
  1450. uc->psd_size = val;
  1451. uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
  1452. if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
  1453. dev_err(ud->dev, "ti,psil-base is missing\n");
  1454. return -EINVAL;
  1455. }
  1456. uc->slave_thread_id = val + args->args[1];
  1457. dma->id = uc->id;
  1458. pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
  1459. dma->id, uc->needs_epib,
  1460. uc->psd_size, uc->metadata_size,
  1461. uc->slave_thread_id);
  1462. return 0;
  1463. }
  1464. int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
  1465. {
  1466. struct udma_dev *ud = dev_get_priv(dma->dev);
  1467. struct cppi5_host_desc_t *desc_rx;
  1468. dma_addr_t dma_dst;
  1469. struct udma_chan *uc;
  1470. u32 desc_num;
  1471. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1472. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1473. return -EINVAL;
  1474. }
  1475. uc = &ud->channels[dma->id];
  1476. if (uc->dir != DMA_DEV_TO_MEM)
  1477. return -EINVAL;
  1478. if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
  1479. return -EINVAL;
  1480. desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
  1481. desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
  1482. dma_dst = (dma_addr_t)dst;
  1483. cppi5_hdesc_reset_hbdesc(desc_rx);
  1484. cppi5_hdesc_init(desc_rx,
  1485. uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
  1486. uc->psd_size);
  1487. cppi5_hdesc_set_pktlen(desc_rx, size);
  1488. cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
  1489. flush_dcache_range((unsigned long)desc_rx,
  1490. ALIGN((unsigned long)desc_rx + uc->hdesc_size,
  1491. ARCH_DMA_MINALIGN));
  1492. udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
  1493. uc->num_rx_bufs++;
  1494. uc->desc_rx_cur++;
  1495. return 0;
  1496. }
  1497. static int udma_get_cfg(struct dma *dma, u32 id, void **data)
  1498. {
  1499. struct udma_dev *ud = dev_get_priv(dma->dev);
  1500. struct udma_chan *uc;
  1501. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1502. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1503. return -EINVAL;
  1504. }
  1505. switch (id) {
  1506. case TI_UDMA_CHAN_PRIV_INFO:
  1507. uc = &ud->channels[dma->id];
  1508. *data = &uc->cfg_data;
  1509. return 0;
  1510. }
  1511. return -EINVAL;
  1512. }
  1513. static const struct dma_ops udma_ops = {
  1514. .transfer = udma_transfer,
  1515. .of_xlate = udma_of_xlate,
  1516. .request = udma_request,
  1517. .rfree = udma_rfree,
  1518. .enable = udma_enable,
  1519. .disable = udma_disable,
  1520. .send = udma_send,
  1521. .receive = udma_receive,
  1522. .prepare_rcv_buf = udma_prepare_rcv_buf,
  1523. .get_cfg = udma_get_cfg,
  1524. };
  1525. static const struct udevice_id udma_ids[] = {
  1526. { .compatible = "ti,k3-navss-udmap" },
  1527. { .compatible = "ti,j721e-navss-mcu-udmap" },
  1528. { }
  1529. };
  1530. U_BOOT_DRIVER(ti_edma3) = {
  1531. .name = "ti-udma",
  1532. .id = UCLASS_DMA,
  1533. .of_match = udma_ids,
  1534. .ops = &udma_ops,
  1535. .probe = udma_probe,
  1536. .priv_auto_alloc_size = sizeof(struct udma_dev),
  1537. };