k3-udma.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
  4. * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
  5. */
  6. #define pr_fmt(fmt) "udma: " fmt
  7. #include <common.h>
  8. #include <cpu_func.h>
  9. #include <log.h>
  10. #include <asm/cache.h>
  11. #include <asm/io.h>
  12. #include <asm/bitops.h>
  13. #include <malloc.h>
  14. #include <linux/bitops.h>
  15. #include <linux/dma-mapping.h>
  16. #include <dm.h>
  17. #include <dm/device_compat.h>
  18. #include <dm/devres.h>
  19. #include <dm/read.h>
  20. #include <dm/of_access.h>
  21. #include <dma.h>
  22. #include <dma-uclass.h>
  23. #include <linux/delay.h>
  24. #include <linux/bitmap.h>
  25. #include <linux/err.h>
  26. #include <linux/soc/ti/k3-navss-ringacc.h>
  27. #include <linux/soc/ti/cppi5.h>
  28. #include <linux/soc/ti/ti-udma.h>
  29. #include <linux/soc/ti/ti_sci_protocol.h>
  30. #include "k3-udma-hwdef.h"
  31. #include "k3-psil-priv.h"
  32. #define K3_UDMA_MAX_RFLOWS 1024
  33. struct udma_chan;
  34. enum udma_mmr {
  35. MMR_GCFG = 0,
  36. MMR_RCHANRT,
  37. MMR_TCHANRT,
  38. MMR_LAST,
  39. };
  40. static const char * const mmr_names[] = {
  41. "gcfg", "rchanrt", "tchanrt"
  42. };
  43. struct udma_tchan {
  44. void __iomem *reg_rt;
  45. int id;
  46. struct k3_nav_ring *t_ring; /* Transmit ring */
  47. struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
  48. };
  49. struct udma_rchan {
  50. void __iomem *reg_rt;
  51. int id;
  52. };
  53. #define UDMA_FLAG_PDMA_ACC32 BIT(0)
  54. #define UDMA_FLAG_PDMA_BURST BIT(1)
  55. #define UDMA_FLAG_TDTYPE BIT(2)
  56. struct udma_match_data {
  57. u32 psil_base;
  58. bool enable_memcpy_support;
  59. u32 flags;
  60. u32 statictr_z_mask;
  61. u32 rchan_oes_offset;
  62. u8 tpl_levels;
  63. u32 level_start_idx[];
  64. };
  65. struct udma_rflow {
  66. int id;
  67. struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
  68. struct k3_nav_ring *r_ring; /* Receive ring*/
  69. };
  70. enum udma_rm_range {
  71. RM_RANGE_TCHAN = 0,
  72. RM_RANGE_RCHAN,
  73. RM_RANGE_RFLOW,
  74. RM_RANGE_LAST,
  75. };
  76. struct udma_tisci_rm {
  77. const struct ti_sci_handle *tisci;
  78. const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
  79. u32 tisci_dev_id;
  80. /* tisci information for PSI-L thread pairing/unpairing */
  81. const struct ti_sci_rm_psil_ops *tisci_psil_ops;
  82. u32 tisci_navss_dev_id;
  83. struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
  84. };
  85. struct udma_dev {
  86. struct udevice *dev;
  87. void __iomem *mmrs[MMR_LAST];
  88. struct udma_tisci_rm tisci_rm;
  89. struct k3_nav_ringacc *ringacc;
  90. u32 features;
  91. int tchan_cnt;
  92. int echan_cnt;
  93. int rchan_cnt;
  94. int rflow_cnt;
  95. unsigned long *tchan_map;
  96. unsigned long *rchan_map;
  97. unsigned long *rflow_map;
  98. unsigned long *rflow_map_reserved;
  99. struct udma_tchan *tchans;
  100. struct udma_rchan *rchans;
  101. struct udma_rflow *rflows;
  102. struct udma_match_data *match_data;
  103. struct udma_chan *channels;
  104. u32 psil_base;
  105. u32 ch_count;
  106. };
  107. struct udma_chan_config {
  108. u32 psd_size; /* size of Protocol Specific Data */
  109. u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
  110. u32 hdesc_size; /* Size of a packet descriptor in packet mode */
  111. int remote_thread_id;
  112. u32 atype;
  113. u32 src_thread;
  114. u32 dst_thread;
  115. enum psil_endpoint_type ep_type;
  116. enum udma_tp_level channel_tpl; /* Channel Throughput Level */
  117. enum dma_direction dir;
  118. unsigned int pkt_mode:1; /* TR or packet */
  119. unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
  120. unsigned int enable_acc32:1;
  121. unsigned int enable_burst:1;
  122. unsigned int notdpkt:1; /* Suppress sending TDC packet */
  123. };
  124. struct udma_chan {
  125. struct udma_dev *ud;
  126. char name[20];
  127. struct udma_tchan *tchan;
  128. struct udma_rchan *rchan;
  129. struct udma_rflow *rflow;
  130. struct ti_udma_drv_chan_cfg_data cfg_data;
  131. u32 bcnt; /* number of bytes completed since the start of the channel */
  132. struct udma_chan_config config;
  133. u32 id;
  134. struct cppi5_host_desc_t *desc_tx;
  135. bool in_use;
  136. void *desc_rx;
  137. u32 num_rx_bufs;
  138. u32 desc_rx_cur;
  139. };
  140. #define UDMA_CH_1000(ch) (ch * 0x1000)
  141. #define UDMA_CH_100(ch) (ch * 0x100)
  142. #define UDMA_CH_40(ch) (ch * 0x40)
  143. #ifdef PKTBUFSRX
  144. #define UDMA_RX_DESC_NUM PKTBUFSRX
  145. #else
  146. #define UDMA_RX_DESC_NUM 4
  147. #endif
  148. /* Generic register access functions */
  149. static inline u32 udma_read(void __iomem *base, int reg)
  150. {
  151. u32 v;
  152. v = __raw_readl(base + reg);
  153. pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
  154. return v;
  155. }
  156. static inline void udma_write(void __iomem *base, int reg, u32 val)
  157. {
  158. pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
  159. __raw_writel(val, base + reg);
  160. }
  161. static inline void udma_update_bits(void __iomem *base, int reg,
  162. u32 mask, u32 val)
  163. {
  164. u32 tmp, orig;
  165. orig = udma_read(base, reg);
  166. tmp = orig & ~mask;
  167. tmp |= (val & mask);
  168. if (tmp != orig)
  169. udma_write(base, reg, tmp);
  170. }
  171. /* TCHANRT */
  172. static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
  173. {
  174. if (!tchan)
  175. return 0;
  176. return udma_read(tchan->reg_rt, reg);
  177. }
  178. static inline void udma_tchanrt_write(struct udma_tchan *tchan,
  179. int reg, u32 val)
  180. {
  181. if (!tchan)
  182. return;
  183. udma_write(tchan->reg_rt, reg, val);
  184. }
  185. /* RCHANRT */
  186. static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
  187. {
  188. if (!rchan)
  189. return 0;
  190. return udma_read(rchan->reg_rt, reg);
  191. }
  192. static inline void udma_rchanrt_write(struct udma_rchan *rchan,
  193. int reg, u32 val)
  194. {
  195. if (!rchan)
  196. return;
  197. udma_write(rchan->reg_rt, reg, val);
  198. }
  199. static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
  200. u32 dst_thread)
  201. {
  202. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  203. dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
  204. return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
  205. tisci_rm->tisci_navss_dev_id,
  206. src_thread, dst_thread);
  207. }
  208. static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
  209. u32 dst_thread)
  210. {
  211. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  212. dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
  213. return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
  214. tisci_rm->tisci_navss_dev_id,
  215. src_thread, dst_thread);
  216. }
  217. static inline char *udma_get_dir_text(enum dma_direction dir)
  218. {
  219. switch (dir) {
  220. case DMA_DEV_TO_MEM:
  221. return "DEV_TO_MEM";
  222. case DMA_MEM_TO_DEV:
  223. return "MEM_TO_DEV";
  224. case DMA_MEM_TO_MEM:
  225. return "MEM_TO_MEM";
  226. case DMA_DEV_TO_DEV:
  227. return "DEV_TO_DEV";
  228. default:
  229. break;
  230. }
  231. return "invalid";
  232. }
  233. static inline bool udma_is_chan_running(struct udma_chan *uc)
  234. {
  235. u32 trt_ctl = 0;
  236. u32 rrt_ctl = 0;
  237. switch (uc->config.dir) {
  238. case DMA_DEV_TO_MEM:
  239. rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  240. pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
  241. __func__, rrt_ctl,
  242. udma_rchanrt_read(uc->rchan,
  243. UDMA_RCHAN_RT_PEER_RT_EN_REG));
  244. break;
  245. case DMA_MEM_TO_DEV:
  246. trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  247. pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
  248. __func__, trt_ctl,
  249. udma_tchanrt_read(uc->tchan,
  250. UDMA_TCHAN_RT_PEER_RT_EN_REG));
  251. break;
  252. case DMA_MEM_TO_MEM:
  253. trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  254. rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  255. break;
  256. default:
  257. break;
  258. }
  259. if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
  260. return true;
  261. return false;
  262. }
  263. static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
  264. {
  265. struct k3_nav_ring *ring = NULL;
  266. int ret = -ENOENT;
  267. switch (uc->config.dir) {
  268. case DMA_DEV_TO_MEM:
  269. ring = uc->rflow->r_ring;
  270. break;
  271. case DMA_MEM_TO_DEV:
  272. ring = uc->tchan->tc_ring;
  273. break;
  274. case DMA_MEM_TO_MEM:
  275. ring = uc->tchan->tc_ring;
  276. break;
  277. default:
  278. break;
  279. }
  280. if (ring && k3_nav_ringacc_ring_get_occ(ring))
  281. ret = k3_nav_ringacc_ring_pop(ring, addr);
  282. return ret;
  283. }
  284. static void udma_reset_rings(struct udma_chan *uc)
  285. {
  286. struct k3_nav_ring *ring1 = NULL;
  287. struct k3_nav_ring *ring2 = NULL;
  288. switch (uc->config.dir) {
  289. case DMA_DEV_TO_MEM:
  290. ring1 = uc->rflow->fd_ring;
  291. ring2 = uc->rflow->r_ring;
  292. break;
  293. case DMA_MEM_TO_DEV:
  294. ring1 = uc->tchan->t_ring;
  295. ring2 = uc->tchan->tc_ring;
  296. break;
  297. case DMA_MEM_TO_MEM:
  298. ring1 = uc->tchan->t_ring;
  299. ring2 = uc->tchan->tc_ring;
  300. break;
  301. default:
  302. break;
  303. }
  304. if (ring1)
  305. k3_nav_ringacc_ring_reset_dma(ring1, 0);
  306. if (ring2)
  307. k3_nav_ringacc_ring_reset(ring2);
  308. }
  309. static void udma_reset_counters(struct udma_chan *uc)
  310. {
  311. u32 val;
  312. if (uc->tchan) {
  313. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
  314. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
  315. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
  316. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
  317. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
  318. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
  319. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
  320. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
  321. }
  322. if (uc->rchan) {
  323. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
  324. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
  325. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
  326. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
  327. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
  328. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
  329. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
  330. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
  331. }
  332. uc->bcnt = 0;
  333. }
  334. static inline int udma_stop_hard(struct udma_chan *uc)
  335. {
  336. pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
  337. switch (uc->config.dir) {
  338. case DMA_DEV_TO_MEM:
  339. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
  340. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
  341. break;
  342. case DMA_MEM_TO_DEV:
  343. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
  344. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
  345. break;
  346. case DMA_MEM_TO_MEM:
  347. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
  348. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
  349. break;
  350. default:
  351. return -EINVAL;
  352. }
  353. return 0;
  354. }
  355. static int udma_start(struct udma_chan *uc)
  356. {
  357. /* Channel is already running, no need to proceed further */
  358. if (udma_is_chan_running(uc))
  359. goto out;
  360. pr_debug("%s: chan:%d dir:%s\n",
  361. __func__, uc->id, udma_get_dir_text(uc->config.dir));
  362. /* Make sure that we clear the teardown bit, if it is set */
  363. udma_stop_hard(uc);
  364. /* Reset all counters */
  365. udma_reset_counters(uc);
  366. switch (uc->config.dir) {
  367. case DMA_DEV_TO_MEM:
  368. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
  369. UDMA_CHAN_RT_CTL_EN);
  370. /* Enable remote */
  371. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
  372. UDMA_PEER_RT_EN_ENABLE);
  373. pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
  374. __func__,
  375. udma_rchanrt_read(uc->rchan,
  376. UDMA_RCHAN_RT_CTL_REG),
  377. udma_rchanrt_read(uc->rchan,
  378. UDMA_RCHAN_RT_PEER_RT_EN_REG));
  379. break;
  380. case DMA_MEM_TO_DEV:
  381. /* Enable remote */
  382. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
  383. UDMA_PEER_RT_EN_ENABLE);
  384. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
  385. UDMA_CHAN_RT_CTL_EN);
  386. pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
  387. __func__,
  388. udma_tchanrt_read(uc->tchan,
  389. UDMA_TCHAN_RT_CTL_REG),
  390. udma_tchanrt_read(uc->tchan,
  391. UDMA_TCHAN_RT_PEER_RT_EN_REG));
  392. break;
  393. case DMA_MEM_TO_MEM:
  394. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
  395. UDMA_CHAN_RT_CTL_EN);
  396. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
  397. UDMA_CHAN_RT_CTL_EN);
  398. break;
  399. default:
  400. return -EINVAL;
  401. }
  402. pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
  403. out:
  404. return 0;
  405. }
  406. static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
  407. {
  408. int i = 0;
  409. u32 val;
  410. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
  411. UDMA_CHAN_RT_CTL_EN |
  412. UDMA_CHAN_RT_CTL_TDOWN);
  413. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  414. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  415. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
  416. udelay(1);
  417. if (i > 1000) {
  418. printf(" %s TIMEOUT !\n", __func__);
  419. break;
  420. }
  421. i++;
  422. }
  423. val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
  424. if (val & UDMA_PEER_RT_EN_ENABLE)
  425. printf("%s: peer not stopped TIMEOUT !\n", __func__);
  426. }
  427. static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
  428. {
  429. int i = 0;
  430. u32 val;
  431. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
  432. UDMA_PEER_RT_EN_ENABLE |
  433. UDMA_PEER_RT_EN_TEARDOWN);
  434. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  435. while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
  436. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
  437. udelay(1);
  438. if (i > 1000) {
  439. printf("%s TIMEOUT !\n", __func__);
  440. break;
  441. }
  442. i++;
  443. }
  444. val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
  445. if (val & UDMA_PEER_RT_EN_ENABLE)
  446. printf("%s: peer not stopped TIMEOUT !\n", __func__);
  447. }
  448. static inline int udma_stop(struct udma_chan *uc)
  449. {
  450. pr_debug("%s: chan:%d dir:%s\n",
  451. __func__, uc->id, udma_get_dir_text(uc->config.dir));
  452. udma_reset_counters(uc);
  453. switch (uc->config.dir) {
  454. case DMA_DEV_TO_MEM:
  455. udma_stop_dev2mem(uc, true);
  456. break;
  457. case DMA_MEM_TO_DEV:
  458. udma_stop_mem2dev(uc, true);
  459. break;
  460. case DMA_MEM_TO_MEM:
  461. udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
  462. udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
  463. break;
  464. default:
  465. return -EINVAL;
  466. }
  467. return 0;
  468. }
  469. static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
  470. {
  471. int i = 1;
  472. while (udma_pop_from_ring(uc, paddr)) {
  473. udelay(1);
  474. if (!(i % 1000000))
  475. printf(".");
  476. i++;
  477. }
  478. }
  479. static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
  480. {
  481. DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
  482. if (id >= 0) {
  483. if (test_bit(id, ud->rflow_map)) {
  484. dev_err(ud->dev, "rflow%d is in use\n", id);
  485. return ERR_PTR(-ENOENT);
  486. }
  487. } else {
  488. bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
  489. ud->rflow_cnt);
  490. id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
  491. if (id >= ud->rflow_cnt)
  492. return ERR_PTR(-ENOENT);
  493. }
  494. __set_bit(id, ud->rflow_map);
  495. return &ud->rflows[id];
  496. }
  497. #define UDMA_RESERVE_RESOURCE(res) \
  498. static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
  499. int id) \
  500. { \
  501. if (id >= 0) { \
  502. if (test_bit(id, ud->res##_map)) { \
  503. dev_err(ud->dev, "res##%d is in use\n", id); \
  504. return ERR_PTR(-ENOENT); \
  505. } \
  506. } else { \
  507. id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
  508. if (id == ud->res##_cnt) { \
  509. return ERR_PTR(-ENOENT); \
  510. } \
  511. } \
  512. \
  513. __set_bit(id, ud->res##_map); \
  514. return &ud->res##s[id]; \
  515. }
  516. UDMA_RESERVE_RESOURCE(tchan);
  517. UDMA_RESERVE_RESOURCE(rchan);
  518. static int udma_get_tchan(struct udma_chan *uc)
  519. {
  520. struct udma_dev *ud = uc->ud;
  521. if (uc->tchan) {
  522. dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
  523. uc->id, uc->tchan->id);
  524. return 0;
  525. }
  526. uc->tchan = __udma_reserve_tchan(ud, -1);
  527. if (IS_ERR(uc->tchan))
  528. return PTR_ERR(uc->tchan);
  529. pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
  530. return 0;
  531. }
  532. static int udma_get_rchan(struct udma_chan *uc)
  533. {
  534. struct udma_dev *ud = uc->ud;
  535. if (uc->rchan) {
  536. dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
  537. uc->id, uc->rchan->id);
  538. return 0;
  539. }
  540. uc->rchan = __udma_reserve_rchan(ud, -1);
  541. if (IS_ERR(uc->rchan))
  542. return PTR_ERR(uc->rchan);
  543. pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
  544. return 0;
  545. }
  546. static int udma_get_chan_pair(struct udma_chan *uc)
  547. {
  548. struct udma_dev *ud = uc->ud;
  549. int chan_id, end;
  550. if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
  551. dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
  552. uc->id, uc->tchan->id);
  553. return 0;
  554. }
  555. if (uc->tchan) {
  556. dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
  557. uc->id, uc->tchan->id);
  558. return -EBUSY;
  559. } else if (uc->rchan) {
  560. dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
  561. uc->id, uc->rchan->id);
  562. return -EBUSY;
  563. }
  564. /* Can be optimized, but let's have it like this for now */
  565. end = min(ud->tchan_cnt, ud->rchan_cnt);
  566. for (chan_id = 0; chan_id < end; chan_id++) {
  567. if (!test_bit(chan_id, ud->tchan_map) &&
  568. !test_bit(chan_id, ud->rchan_map))
  569. break;
  570. }
  571. if (chan_id == end)
  572. return -ENOENT;
  573. __set_bit(chan_id, ud->tchan_map);
  574. __set_bit(chan_id, ud->rchan_map);
  575. uc->tchan = &ud->tchans[chan_id];
  576. uc->rchan = &ud->rchans[chan_id];
  577. pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
  578. return 0;
  579. }
  580. static int udma_get_rflow(struct udma_chan *uc, int flow_id)
  581. {
  582. struct udma_dev *ud = uc->ud;
  583. if (uc->rflow) {
  584. dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
  585. uc->id, uc->rflow->id);
  586. return 0;
  587. }
  588. if (!uc->rchan)
  589. dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
  590. uc->rflow = __udma_reserve_rflow(ud, flow_id);
  591. if (IS_ERR(uc->rflow))
  592. return PTR_ERR(uc->rflow);
  593. pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
  594. return 0;
  595. }
  596. static void udma_put_rchan(struct udma_chan *uc)
  597. {
  598. struct udma_dev *ud = uc->ud;
  599. if (uc->rchan) {
  600. dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
  601. uc->rchan->id);
  602. __clear_bit(uc->rchan->id, ud->rchan_map);
  603. uc->rchan = NULL;
  604. }
  605. }
  606. static void udma_put_tchan(struct udma_chan *uc)
  607. {
  608. struct udma_dev *ud = uc->ud;
  609. if (uc->tchan) {
  610. dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
  611. uc->tchan->id);
  612. __clear_bit(uc->tchan->id, ud->tchan_map);
  613. uc->tchan = NULL;
  614. }
  615. }
  616. static void udma_put_rflow(struct udma_chan *uc)
  617. {
  618. struct udma_dev *ud = uc->ud;
  619. if (uc->rflow) {
  620. dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
  621. uc->rflow->id);
  622. __clear_bit(uc->rflow->id, ud->rflow_map);
  623. uc->rflow = NULL;
  624. }
  625. }
  626. static void udma_free_tx_resources(struct udma_chan *uc)
  627. {
  628. if (!uc->tchan)
  629. return;
  630. k3_nav_ringacc_ring_free(uc->tchan->t_ring);
  631. k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
  632. uc->tchan->t_ring = NULL;
  633. uc->tchan->tc_ring = NULL;
  634. udma_put_tchan(uc);
  635. }
  636. static int udma_alloc_tx_resources(struct udma_chan *uc)
  637. {
  638. struct k3_nav_ring_cfg ring_cfg;
  639. struct udma_dev *ud = uc->ud;
  640. int ret;
  641. ret = udma_get_tchan(uc);
  642. if (ret)
  643. return ret;
  644. ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
  645. &uc->tchan->t_ring,
  646. &uc->tchan->tc_ring);
  647. if (ret) {
  648. ret = -EBUSY;
  649. goto err_tx_ring;
  650. }
  651. memset(&ring_cfg, 0, sizeof(ring_cfg));
  652. ring_cfg.size = 16;
  653. ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
  654. ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
  655. ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
  656. ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
  657. if (ret)
  658. goto err_ringcfg;
  659. return 0;
  660. err_ringcfg:
  661. k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
  662. uc->tchan->tc_ring = NULL;
  663. k3_nav_ringacc_ring_free(uc->tchan->t_ring);
  664. uc->tchan->t_ring = NULL;
  665. err_tx_ring:
  666. udma_put_tchan(uc);
  667. return ret;
  668. }
  669. static void udma_free_rx_resources(struct udma_chan *uc)
  670. {
  671. if (!uc->rchan)
  672. return;
  673. if (uc->rflow) {
  674. k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
  675. k3_nav_ringacc_ring_free(uc->rflow->r_ring);
  676. uc->rflow->fd_ring = NULL;
  677. uc->rflow->r_ring = NULL;
  678. udma_put_rflow(uc);
  679. }
  680. udma_put_rchan(uc);
  681. }
  682. static int udma_alloc_rx_resources(struct udma_chan *uc)
  683. {
  684. struct k3_nav_ring_cfg ring_cfg;
  685. struct udma_dev *ud = uc->ud;
  686. struct udma_rflow *rflow;
  687. int fd_ring_id;
  688. int ret;
  689. ret = udma_get_rchan(uc);
  690. if (ret)
  691. return ret;
  692. /* For MEM_TO_MEM we don't need rflow or rings */
  693. if (uc->config.dir == DMA_MEM_TO_MEM)
  694. return 0;
  695. ret = udma_get_rflow(uc, uc->rchan->id);
  696. if (ret) {
  697. ret = -EBUSY;
  698. goto err_rflow;
  699. }
  700. fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
  701. rflow = uc->rflow;
  702. ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
  703. &rflow->fd_ring, &rflow->r_ring);
  704. if (ret) {
  705. ret = -EBUSY;
  706. goto err_rx_ring;
  707. }
  708. memset(&ring_cfg, 0, sizeof(ring_cfg));
  709. ring_cfg.size = 16;
  710. ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
  711. ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
  712. ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
  713. ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
  714. if (ret)
  715. goto err_ringcfg;
  716. return 0;
  717. err_ringcfg:
  718. k3_nav_ringacc_ring_free(rflow->r_ring);
  719. rflow->r_ring = NULL;
  720. k3_nav_ringacc_ring_free(rflow->fd_ring);
  721. rflow->fd_ring = NULL;
  722. err_rx_ring:
  723. udma_put_rflow(uc);
  724. err_rflow:
  725. udma_put_rchan(uc);
  726. return ret;
  727. }
  728. static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
  729. {
  730. struct udma_dev *ud = uc->ud;
  731. int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  732. struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
  733. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  734. u32 mode;
  735. int ret;
  736. if (uc->config.pkt_mode)
  737. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  738. else
  739. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
  740. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
  741. TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  742. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
  743. req.nav_id = tisci_rm->tisci_dev_id;
  744. req.index = uc->tchan->id;
  745. req.tx_chan_type = mode;
  746. if (uc->config.dir == DMA_MEM_TO_MEM)
  747. req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
  748. else
  749. req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
  750. uc->config.psd_size,
  751. 0) >> 2;
  752. req.txcq_qnum = tc_ring;
  753. ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
  754. if (ret)
  755. dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
  756. return ret;
  757. }
  758. static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
  759. {
  760. struct udma_dev *ud = uc->ud;
  761. int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
  762. int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
  763. int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  764. struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
  765. struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
  766. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  767. u32 mode;
  768. int ret;
  769. if (uc->config.pkt_mode)
  770. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
  771. else
  772. mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
  773. req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
  774. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
  775. TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
  776. TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
  777. TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
  778. req.nav_id = tisci_rm->tisci_dev_id;
  779. req.index = uc->rchan->id;
  780. req.rx_chan_type = mode;
  781. if (uc->config.dir == DMA_MEM_TO_MEM) {
  782. req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
  783. req.rxcq_qnum = tc_ring;
  784. } else {
  785. req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
  786. uc->config.psd_size,
  787. 0) >> 2;
  788. req.rxcq_qnum = rx_ring;
  789. }
  790. if (uc->rflow->id != uc->rchan->id && uc->config.dir != DMA_MEM_TO_MEM) {
  791. req.flowid_start = uc->rflow->id;
  792. req.flowid_cnt = 1;
  793. }
  794. ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
  795. if (ret) {
  796. dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
  797. uc->rchan->id, ret);
  798. return ret;
  799. }
  800. if (uc->config.dir == DMA_MEM_TO_MEM)
  801. return ret;
  802. flow_req.valid_params =
  803. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
  804. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
  805. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
  806. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
  807. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
  808. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
  809. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
  810. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
  811. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
  812. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
  813. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
  814. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
  815. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
  816. TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
  817. flow_req.nav_id = tisci_rm->tisci_dev_id;
  818. flow_req.flow_index = uc->rflow->id;
  819. if (uc->config.needs_epib)
  820. flow_req.rx_einfo_present = 1;
  821. else
  822. flow_req.rx_einfo_present = 0;
  823. if (uc->config.psd_size)
  824. flow_req.rx_psinfo_present = 1;
  825. else
  826. flow_req.rx_psinfo_present = 0;
  827. flow_req.rx_error_handling = 0;
  828. flow_req.rx_desc_type = 0;
  829. flow_req.rx_dest_qnum = rx_ring;
  830. flow_req.rx_src_tag_hi_sel = 2;
  831. flow_req.rx_src_tag_lo_sel = 4;
  832. flow_req.rx_dest_tag_hi_sel = 5;
  833. flow_req.rx_dest_tag_lo_sel = 4;
  834. flow_req.rx_fdq0_sz0_qnum = fd_ring;
  835. flow_req.rx_fdq1_qnum = fd_ring;
  836. flow_req.rx_fdq2_qnum = fd_ring;
  837. flow_req.rx_fdq3_qnum = fd_ring;
  838. flow_req.rx_ps_location = 0;
  839. ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
  840. &flow_req);
  841. if (ret)
  842. dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
  843. uc->rchan->id, uc->rflow->id, ret);
  844. return ret;
  845. }
  846. static int udma_alloc_chan_resources(struct udma_chan *uc)
  847. {
  848. struct udma_dev *ud = uc->ud;
  849. int ret;
  850. pr_debug("%s: chan:%d as %s\n",
  851. __func__, uc->id, udma_get_dir_text(uc->config.dir));
  852. switch (uc->config.dir) {
  853. case DMA_MEM_TO_MEM:
  854. /* Non synchronized - mem to mem type of transfer */
  855. uc->config.pkt_mode = false;
  856. ret = udma_get_chan_pair(uc);
  857. if (ret)
  858. return ret;
  859. ret = udma_alloc_tx_resources(uc);
  860. if (ret)
  861. goto err_free_res;
  862. ret = udma_alloc_rx_resources(uc);
  863. if (ret)
  864. goto err_free_res;
  865. uc->config.src_thread = ud->psil_base + uc->tchan->id;
  866. uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
  867. break;
  868. case DMA_MEM_TO_DEV:
  869. /* Slave transfer synchronized - mem to dev (TX) trasnfer */
  870. ret = udma_alloc_tx_resources(uc);
  871. if (ret)
  872. goto err_free_res;
  873. uc->config.src_thread = ud->psil_base + uc->tchan->id;
  874. uc->config.dst_thread = uc->config.remote_thread_id;
  875. uc->config.dst_thread |= 0x8000;
  876. break;
  877. case DMA_DEV_TO_MEM:
  878. /* Slave transfer synchronized - dev to mem (RX) trasnfer */
  879. ret = udma_alloc_rx_resources(uc);
  880. if (ret)
  881. goto err_free_res;
  882. uc->config.src_thread = uc->config.remote_thread_id;
  883. uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
  884. break;
  885. default:
  886. /* Can not happen */
  887. pr_debug("%s: chan:%d invalid direction (%u)\n",
  888. __func__, uc->id, uc->config.dir);
  889. return -EINVAL;
  890. }
  891. /* We have channel indexes and rings */
  892. if (uc->config.dir == DMA_MEM_TO_MEM) {
  893. ret = udma_alloc_tchan_sci_req(uc);
  894. if (ret)
  895. goto err_free_res;
  896. ret = udma_alloc_rchan_sci_req(uc);
  897. if (ret)
  898. goto err_free_res;
  899. } else {
  900. /* Slave transfer */
  901. if (uc->config.dir == DMA_MEM_TO_DEV) {
  902. ret = udma_alloc_tchan_sci_req(uc);
  903. if (ret)
  904. goto err_free_res;
  905. } else {
  906. ret = udma_alloc_rchan_sci_req(uc);
  907. if (ret)
  908. goto err_free_res;
  909. }
  910. }
  911. if (udma_is_chan_running(uc)) {
  912. dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
  913. udma_stop(uc);
  914. if (udma_is_chan_running(uc)) {
  915. dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
  916. goto err_free_res;
  917. }
  918. }
  919. /* PSI-L pairing */
  920. ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
  921. if (ret) {
  922. dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
  923. goto err_free_res;
  924. }
  925. return 0;
  926. err_free_res:
  927. udma_free_tx_resources(uc);
  928. udma_free_rx_resources(uc);
  929. uc->config.remote_thread_id = -1;
  930. return ret;
  931. }
  932. static void udma_free_chan_resources(struct udma_chan *uc)
  933. {
  934. /* Some configuration to UDMA-P channel: disable, reset, whatever */
  935. /* Release PSI-L pairing */
  936. udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
  937. /* Reset the rings for a new start */
  938. udma_reset_rings(uc);
  939. udma_free_tx_resources(uc);
  940. udma_free_rx_resources(uc);
  941. uc->config.remote_thread_id = -1;
  942. uc->config.dir = DMA_MEM_TO_MEM;
  943. }
  944. static int udma_get_mmrs(struct udevice *dev)
  945. {
  946. struct udma_dev *ud = dev_get_priv(dev);
  947. int i;
  948. for (i = 0; i < MMR_LAST; i++) {
  949. ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
  950. mmr_names[i]);
  951. if (!ud->mmrs[i])
  952. return -EINVAL;
  953. }
  954. return 0;
  955. }
  956. static int udma_setup_resources(struct udma_dev *ud)
  957. {
  958. struct udevice *dev = ud->dev;
  959. int ch_count, i;
  960. u32 cap2, cap3;
  961. struct ti_sci_resource_desc *rm_desc;
  962. struct ti_sci_resource *rm_res;
  963. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  964. static const char * const range_names[] = { "ti,sci-rm-range-tchan",
  965. "ti,sci-rm-range-rchan",
  966. "ti,sci-rm-range-rflow" };
  967. cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
  968. cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
  969. ud->rflow_cnt = cap3 & 0x3fff;
  970. ud->tchan_cnt = cap2 & 0x1ff;
  971. ud->echan_cnt = (cap2 >> 9) & 0x1ff;
  972. ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
  973. ch_count = ud->tchan_cnt + ud->rchan_cnt;
  974. ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
  975. sizeof(unsigned long), GFP_KERNEL);
  976. ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
  977. GFP_KERNEL);
  978. ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
  979. sizeof(unsigned long), GFP_KERNEL);
  980. ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
  981. GFP_KERNEL);
  982. ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
  983. sizeof(unsigned long), GFP_KERNEL);
  984. ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
  985. sizeof(unsigned long),
  986. GFP_KERNEL);
  987. ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
  988. GFP_KERNEL);
  989. if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
  990. !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
  991. !ud->rflows)
  992. return -ENOMEM;
  993. /*
  994. * RX flows with the same Ids as RX channels are reserved to be used
  995. * as default flows if remote HW can't generate flow_ids. Those
  996. * RX flows can be requested only explicitly by id.
  997. */
  998. bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
  999. /* Get resource ranges from tisci */
  1000. for (i = 0; i < RM_RANGE_LAST; i++)
  1001. tisci_rm->rm_ranges[i] =
  1002. devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
  1003. tisci_rm->tisci_dev_id,
  1004. (char *)range_names[i]);
  1005. /* tchan ranges */
  1006. rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
  1007. if (IS_ERR(rm_res)) {
  1008. bitmap_zero(ud->tchan_map, ud->tchan_cnt);
  1009. } else {
  1010. bitmap_fill(ud->tchan_map, ud->tchan_cnt);
  1011. for (i = 0; i < rm_res->sets; i++) {
  1012. rm_desc = &rm_res->desc[i];
  1013. bitmap_clear(ud->tchan_map, rm_desc->start,
  1014. rm_desc->num);
  1015. }
  1016. }
  1017. /* rchan and matching default flow ranges */
  1018. rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
  1019. if (IS_ERR(rm_res)) {
  1020. bitmap_zero(ud->rchan_map, ud->rchan_cnt);
  1021. bitmap_zero(ud->rflow_map, ud->rchan_cnt);
  1022. } else {
  1023. bitmap_fill(ud->rchan_map, ud->rchan_cnt);
  1024. bitmap_fill(ud->rflow_map, ud->rchan_cnt);
  1025. for (i = 0; i < rm_res->sets; i++) {
  1026. rm_desc = &rm_res->desc[i];
  1027. bitmap_clear(ud->rchan_map, rm_desc->start,
  1028. rm_desc->num);
  1029. bitmap_clear(ud->rflow_map, rm_desc->start,
  1030. rm_desc->num);
  1031. }
  1032. }
  1033. /* GP rflow ranges */
  1034. rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
  1035. if (IS_ERR(rm_res)) {
  1036. bitmap_clear(ud->rflow_map, ud->rchan_cnt,
  1037. ud->rflow_cnt - ud->rchan_cnt);
  1038. } else {
  1039. bitmap_set(ud->rflow_map, ud->rchan_cnt,
  1040. ud->rflow_cnt - ud->rchan_cnt);
  1041. for (i = 0; i < rm_res->sets; i++) {
  1042. rm_desc = &rm_res->desc[i];
  1043. bitmap_clear(ud->rflow_map, rm_desc->start,
  1044. rm_desc->num);
  1045. }
  1046. }
  1047. ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
  1048. ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
  1049. if (!ch_count)
  1050. return -ENODEV;
  1051. ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
  1052. GFP_KERNEL);
  1053. if (!ud->channels)
  1054. return -ENOMEM;
  1055. dev_info(dev,
  1056. "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
  1057. ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
  1058. ud->rflow_cnt);
  1059. return ch_count;
  1060. }
  1061. static int udma_probe(struct udevice *dev)
  1062. {
  1063. struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
  1064. struct udma_dev *ud = dev_get_priv(dev);
  1065. int i, ret;
  1066. struct udevice *tmp;
  1067. struct udevice *tisci_dev = NULL;
  1068. struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
  1069. ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
  1070. ret = udma_get_mmrs(dev);
  1071. if (ret)
  1072. return ret;
  1073. ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
  1074. "ti,ringacc", &tmp);
  1075. ud->ringacc = dev_get_priv(tmp);
  1076. if (IS_ERR(ud->ringacc))
  1077. return PTR_ERR(ud->ringacc);
  1078. ud->match_data = (void *)dev_get_driver_data(dev);
  1079. ud->psil_base = ud->match_data->psil_base;
  1080. ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
  1081. "ti,sci", &tisci_dev);
  1082. if (ret) {
  1083. debug("Failed to get TISCI phandle (%d)\n", ret);
  1084. tisci_rm->tisci = NULL;
  1085. return -EINVAL;
  1086. }
  1087. tisci_rm->tisci = (struct ti_sci_handle *)
  1088. (ti_sci_get_handle_from_sysfw(tisci_dev));
  1089. tisci_rm->tisci_dev_id = -1;
  1090. ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
  1091. if (ret) {
  1092. dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
  1093. return ret;
  1094. }
  1095. tisci_rm->tisci_navss_dev_id = -1;
  1096. ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
  1097. &tisci_rm->tisci_navss_dev_id);
  1098. if (ret) {
  1099. dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
  1100. return ret;
  1101. }
  1102. tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
  1103. tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
  1104. ud->dev = dev;
  1105. ud->ch_count = udma_setup_resources(ud);
  1106. if (ud->ch_count <= 0)
  1107. return ud->ch_count;
  1108. dev_info(dev,
  1109. "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
  1110. ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
  1111. tisci_rm->tisci_dev_id);
  1112. dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
  1113. for (i = 0; i < ud->tchan_cnt; i++) {
  1114. struct udma_tchan *tchan = &ud->tchans[i];
  1115. tchan->id = i;
  1116. tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
  1117. }
  1118. for (i = 0; i < ud->rchan_cnt; i++) {
  1119. struct udma_rchan *rchan = &ud->rchans[i];
  1120. rchan->id = i;
  1121. rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
  1122. }
  1123. for (i = 0; i < ud->rflow_cnt; i++) {
  1124. struct udma_rflow *rflow = &ud->rflows[i];
  1125. rflow->id = i;
  1126. }
  1127. for (i = 0; i < ud->ch_count; i++) {
  1128. struct udma_chan *uc = &ud->channels[i];
  1129. uc->ud = ud;
  1130. uc->id = i;
  1131. uc->config.remote_thread_id = -1;
  1132. uc->tchan = NULL;
  1133. uc->rchan = NULL;
  1134. uc->config.dir = DMA_MEM_TO_MEM;
  1135. sprintf(uc->name, "UDMA chan%d\n", i);
  1136. if (!i)
  1137. uc->in_use = true;
  1138. }
  1139. pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
  1140. udma_read(ud->mmrs[MMR_GCFG], 0),
  1141. udma_read(ud->mmrs[MMR_GCFG], 0x20),
  1142. udma_read(ud->mmrs[MMR_GCFG], 0x24),
  1143. udma_read(ud->mmrs[MMR_GCFG], 0x28),
  1144. udma_read(ud->mmrs[MMR_GCFG], 0x2c));
  1145. uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
  1146. return ret;
  1147. }
  1148. static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
  1149. {
  1150. u64 addr = 0;
  1151. memcpy(&addr, &elem, sizeof(elem));
  1152. return k3_nav_ringacc_ring_push(ring, &addr);
  1153. }
  1154. static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
  1155. dma_addr_t src, size_t len)
  1156. {
  1157. u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  1158. struct cppi5_tr_type15_t *tr_req;
  1159. int num_tr;
  1160. size_t tr_size = sizeof(struct cppi5_tr_type15_t);
  1161. u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
  1162. unsigned long dummy;
  1163. void *tr_desc;
  1164. size_t desc_size;
  1165. if (len < SZ_64K) {
  1166. num_tr = 1;
  1167. tr0_cnt0 = len;
  1168. tr0_cnt1 = 1;
  1169. } else {
  1170. unsigned long align_to = __ffs(src | dest);
  1171. if (align_to > 3)
  1172. align_to = 3;
  1173. /*
  1174. * Keep simple: tr0: SZ_64K-alignment blocks,
  1175. * tr1: the remaining
  1176. */
  1177. num_tr = 2;
  1178. tr0_cnt0 = (SZ_64K - BIT(align_to));
  1179. if (len / tr0_cnt0 >= SZ_64K) {
  1180. dev_err(uc->ud->dev, "size %zu is not supported\n",
  1181. len);
  1182. return NULL;
  1183. }
  1184. tr0_cnt1 = len / tr0_cnt0;
  1185. tr1_cnt0 = len % tr0_cnt0;
  1186. }
  1187. desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
  1188. tr_desc = dma_alloc_coherent(desc_size, &dummy);
  1189. if (!tr_desc)
  1190. return NULL;
  1191. memset(tr_desc, 0, desc_size);
  1192. cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
  1193. cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
  1194. cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
  1195. tr_req = tr_desc + tr_size;
  1196. cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
  1197. CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
  1198. cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
  1199. tr_req[0].addr = src;
  1200. tr_req[0].icnt0 = tr0_cnt0;
  1201. tr_req[0].icnt1 = tr0_cnt1;
  1202. tr_req[0].icnt2 = 1;
  1203. tr_req[0].icnt3 = 1;
  1204. tr_req[0].dim1 = tr0_cnt0;
  1205. tr_req[0].daddr = dest;
  1206. tr_req[0].dicnt0 = tr0_cnt0;
  1207. tr_req[0].dicnt1 = tr0_cnt1;
  1208. tr_req[0].dicnt2 = 1;
  1209. tr_req[0].dicnt3 = 1;
  1210. tr_req[0].ddim1 = tr0_cnt0;
  1211. if (num_tr == 2) {
  1212. cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
  1213. CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
  1214. cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
  1215. tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
  1216. tr_req[1].icnt0 = tr1_cnt0;
  1217. tr_req[1].icnt1 = 1;
  1218. tr_req[1].icnt2 = 1;
  1219. tr_req[1].icnt3 = 1;
  1220. tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
  1221. tr_req[1].dicnt0 = tr1_cnt0;
  1222. tr_req[1].dicnt1 = 1;
  1223. tr_req[1].dicnt2 = 1;
  1224. tr_req[1].dicnt3 = 1;
  1225. }
  1226. cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
  1227. flush_dcache_range((unsigned long)tr_desc,
  1228. ALIGN((unsigned long)tr_desc + desc_size,
  1229. ARCH_DMA_MINALIGN));
  1230. udma_push_to_ring(uc->tchan->t_ring, tr_desc);
  1231. return 0;
  1232. }
  1233. static int udma_transfer(struct udevice *dev, int direction,
  1234. void *dst, void *src, size_t len)
  1235. {
  1236. struct udma_dev *ud = dev_get_priv(dev);
  1237. /* Channel0 is reserved for memcpy */
  1238. struct udma_chan *uc = &ud->channels[0];
  1239. dma_addr_t paddr = 0;
  1240. int ret;
  1241. ret = udma_alloc_chan_resources(uc);
  1242. if (ret)
  1243. return ret;
  1244. udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
  1245. udma_start(uc);
  1246. udma_poll_completion(uc, &paddr);
  1247. udma_stop(uc);
  1248. udma_free_chan_resources(uc);
  1249. return 0;
  1250. }
  1251. static int udma_request(struct dma *dma)
  1252. {
  1253. struct udma_dev *ud = dev_get_priv(dma->dev);
  1254. struct udma_chan_config *ucc;
  1255. struct udma_chan *uc;
  1256. unsigned long dummy;
  1257. int ret;
  1258. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1259. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1260. return -EINVAL;
  1261. }
  1262. uc = &ud->channels[dma->id];
  1263. ucc = &uc->config;
  1264. ret = udma_alloc_chan_resources(uc);
  1265. if (ret) {
  1266. dev_err(dma->dev, "alloc dma res failed %d\n", ret);
  1267. return -EINVAL;
  1268. }
  1269. if (uc->config.dir == DMA_MEM_TO_DEV) {
  1270. uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
  1271. memset(uc->desc_tx, 0, ucc->hdesc_size);
  1272. } else {
  1273. uc->desc_rx = dma_alloc_coherent(
  1274. ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
  1275. memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
  1276. }
  1277. uc->in_use = true;
  1278. uc->desc_rx_cur = 0;
  1279. uc->num_rx_bufs = 0;
  1280. if (uc->config.dir == DMA_DEV_TO_MEM) {
  1281. uc->cfg_data.flow_id_base = uc->rflow->id;
  1282. uc->cfg_data.flow_id_cnt = 1;
  1283. }
  1284. return 0;
  1285. }
  1286. static int udma_rfree(struct dma *dma)
  1287. {
  1288. struct udma_dev *ud = dev_get_priv(dma->dev);
  1289. struct udma_chan *uc;
  1290. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1291. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1292. return -EINVAL;
  1293. }
  1294. uc = &ud->channels[dma->id];
  1295. if (udma_is_chan_running(uc))
  1296. udma_stop(uc);
  1297. udma_free_chan_resources(uc);
  1298. uc->in_use = false;
  1299. return 0;
  1300. }
  1301. static int udma_enable(struct dma *dma)
  1302. {
  1303. struct udma_dev *ud = dev_get_priv(dma->dev);
  1304. struct udma_chan *uc;
  1305. int ret;
  1306. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1307. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1308. return -EINVAL;
  1309. }
  1310. uc = &ud->channels[dma->id];
  1311. ret = udma_start(uc);
  1312. return ret;
  1313. }
  1314. static int udma_disable(struct dma *dma)
  1315. {
  1316. struct udma_dev *ud = dev_get_priv(dma->dev);
  1317. struct udma_chan *uc;
  1318. int ret = 0;
  1319. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1320. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1321. return -EINVAL;
  1322. }
  1323. uc = &ud->channels[dma->id];
  1324. if (udma_is_chan_running(uc))
  1325. ret = udma_stop(uc);
  1326. else
  1327. dev_err(dma->dev, "%s not running\n", __func__);
  1328. return ret;
  1329. }
  1330. static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
  1331. {
  1332. struct udma_dev *ud = dev_get_priv(dma->dev);
  1333. struct cppi5_host_desc_t *desc_tx;
  1334. dma_addr_t dma_src = (dma_addr_t)src;
  1335. struct ti_udma_drv_packet_data packet_data = { 0 };
  1336. dma_addr_t paddr;
  1337. struct udma_chan *uc;
  1338. u32 tc_ring_id;
  1339. int ret;
  1340. if (metadata)
  1341. packet_data = *((struct ti_udma_drv_packet_data *)metadata);
  1342. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1343. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1344. return -EINVAL;
  1345. }
  1346. uc = &ud->channels[dma->id];
  1347. if (uc->config.dir != DMA_MEM_TO_DEV)
  1348. return -EINVAL;
  1349. tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
  1350. desc_tx = uc->desc_tx;
  1351. cppi5_hdesc_reset_hbdesc(desc_tx);
  1352. cppi5_hdesc_init(desc_tx,
  1353. uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
  1354. uc->config.psd_size);
  1355. cppi5_hdesc_set_pktlen(desc_tx, len);
  1356. cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
  1357. cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
  1358. cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
  1359. /* pass below information from caller */
  1360. cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
  1361. cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
  1362. flush_dcache_range((unsigned long)dma_src,
  1363. ALIGN((unsigned long)dma_src + len,
  1364. ARCH_DMA_MINALIGN));
  1365. flush_dcache_range((unsigned long)desc_tx,
  1366. ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
  1367. ARCH_DMA_MINALIGN));
  1368. ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
  1369. if (ret) {
  1370. dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
  1371. dma->id, ret);
  1372. return ret;
  1373. }
  1374. udma_poll_completion(uc, &paddr);
  1375. return 0;
  1376. }
  1377. static int udma_receive(struct dma *dma, void **dst, void *metadata)
  1378. {
  1379. struct udma_dev *ud = dev_get_priv(dma->dev);
  1380. struct udma_chan_config *ucc;
  1381. struct cppi5_host_desc_t *desc_rx;
  1382. dma_addr_t buf_dma;
  1383. struct udma_chan *uc;
  1384. u32 buf_dma_len, pkt_len;
  1385. u32 port_id = 0;
  1386. int ret;
  1387. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1388. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1389. return -EINVAL;
  1390. }
  1391. uc = &ud->channels[dma->id];
  1392. ucc = &uc->config;
  1393. if (uc->config.dir != DMA_DEV_TO_MEM)
  1394. return -EINVAL;
  1395. if (!uc->num_rx_bufs)
  1396. return -EINVAL;
  1397. ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
  1398. if (ret && ret != -ENODATA) {
  1399. dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
  1400. return ret;
  1401. } else if (ret == -ENODATA) {
  1402. return 0;
  1403. }
  1404. /* invalidate cache data */
  1405. invalidate_dcache_range((ulong)desc_rx,
  1406. (ulong)(desc_rx + ucc->hdesc_size));
  1407. cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
  1408. pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
  1409. /* invalidate cache data */
  1410. invalidate_dcache_range((ulong)buf_dma,
  1411. (ulong)(buf_dma + buf_dma_len));
  1412. cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
  1413. *dst = (void *)buf_dma;
  1414. uc->num_rx_bufs--;
  1415. return pkt_len;
  1416. }
  1417. static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
  1418. {
  1419. struct udma_chan_config *ucc;
  1420. struct udma_dev *ud = dev_get_priv(dma->dev);
  1421. struct udma_chan *uc = &ud->channels[0];
  1422. struct psil_endpoint_config *ep_config;
  1423. u32 val;
  1424. for (val = 0; val < ud->ch_count; val++) {
  1425. uc = &ud->channels[val];
  1426. if (!uc->in_use)
  1427. break;
  1428. }
  1429. if (val == ud->ch_count)
  1430. return -EBUSY;
  1431. ucc = &uc->config;
  1432. ucc->remote_thread_id = args->args[0];
  1433. if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
  1434. ucc->dir = DMA_MEM_TO_DEV;
  1435. else
  1436. ucc->dir = DMA_DEV_TO_MEM;
  1437. ep_config = psil_get_ep_config(ucc->remote_thread_id);
  1438. if (IS_ERR(ep_config)) {
  1439. dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
  1440. uc->config.remote_thread_id);
  1441. ucc->dir = DMA_MEM_TO_MEM;
  1442. ucc->remote_thread_id = -1;
  1443. return false;
  1444. }
  1445. ucc->pkt_mode = ep_config->pkt_mode;
  1446. ucc->channel_tpl = ep_config->channel_tpl;
  1447. ucc->notdpkt = ep_config->notdpkt;
  1448. ucc->ep_type = ep_config->ep_type;
  1449. ucc->needs_epib = ep_config->needs_epib;
  1450. ucc->psd_size = ep_config->psd_size;
  1451. ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
  1452. ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
  1453. ucc->psd_size, 0);
  1454. ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
  1455. dma->id = uc->id;
  1456. pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
  1457. dma->id, ucc->needs_epib,
  1458. ucc->psd_size, ucc->metadata_size,
  1459. ucc->remote_thread_id);
  1460. return 0;
  1461. }
  1462. int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
  1463. {
  1464. struct udma_dev *ud = dev_get_priv(dma->dev);
  1465. struct cppi5_host_desc_t *desc_rx;
  1466. dma_addr_t dma_dst;
  1467. struct udma_chan *uc;
  1468. u32 desc_num;
  1469. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1470. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1471. return -EINVAL;
  1472. }
  1473. uc = &ud->channels[dma->id];
  1474. if (uc->config.dir != DMA_DEV_TO_MEM)
  1475. return -EINVAL;
  1476. if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
  1477. return -EINVAL;
  1478. desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
  1479. desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
  1480. dma_dst = (dma_addr_t)dst;
  1481. cppi5_hdesc_reset_hbdesc(desc_rx);
  1482. cppi5_hdesc_init(desc_rx,
  1483. uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
  1484. uc->config.psd_size);
  1485. cppi5_hdesc_set_pktlen(desc_rx, size);
  1486. cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
  1487. flush_dcache_range((unsigned long)desc_rx,
  1488. ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
  1489. ARCH_DMA_MINALIGN));
  1490. udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
  1491. uc->num_rx_bufs++;
  1492. uc->desc_rx_cur++;
  1493. return 0;
  1494. }
  1495. static int udma_get_cfg(struct dma *dma, u32 id, void **data)
  1496. {
  1497. struct udma_dev *ud = dev_get_priv(dma->dev);
  1498. struct udma_chan *uc;
  1499. if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
  1500. dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
  1501. return -EINVAL;
  1502. }
  1503. switch (id) {
  1504. case TI_UDMA_CHAN_PRIV_INFO:
  1505. uc = &ud->channels[dma->id];
  1506. *data = &uc->cfg_data;
  1507. return 0;
  1508. }
  1509. return -EINVAL;
  1510. }
  1511. static const struct dma_ops udma_ops = {
  1512. .transfer = udma_transfer,
  1513. .of_xlate = udma_of_xlate,
  1514. .request = udma_request,
  1515. .rfree = udma_rfree,
  1516. .enable = udma_enable,
  1517. .disable = udma_disable,
  1518. .send = udma_send,
  1519. .receive = udma_receive,
  1520. .prepare_rcv_buf = udma_prepare_rcv_buf,
  1521. .get_cfg = udma_get_cfg,
  1522. };
  1523. static struct udma_match_data am654_main_data = {
  1524. .psil_base = 0x1000,
  1525. .enable_memcpy_support = true,
  1526. .statictr_z_mask = GENMASK(11, 0),
  1527. .rchan_oes_offset = 0x200,
  1528. .tpl_levels = 2,
  1529. .level_start_idx = {
  1530. [0] = 8, /* Normal channels */
  1531. [1] = 0, /* High Throughput channels */
  1532. },
  1533. };
  1534. static struct udma_match_data am654_mcu_data = {
  1535. .psil_base = 0x6000,
  1536. .enable_memcpy_support = true,
  1537. .statictr_z_mask = GENMASK(11, 0),
  1538. .rchan_oes_offset = 0x200,
  1539. .tpl_levels = 2,
  1540. .level_start_idx = {
  1541. [0] = 2, /* Normal channels */
  1542. [1] = 0, /* High Throughput channels */
  1543. },
  1544. };
  1545. static struct udma_match_data j721e_main_data = {
  1546. .psil_base = 0x1000,
  1547. .enable_memcpy_support = true,
  1548. .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
  1549. .statictr_z_mask = GENMASK(23, 0),
  1550. .rchan_oes_offset = 0x400,
  1551. .tpl_levels = 3,
  1552. .level_start_idx = {
  1553. [0] = 16, /* Normal channels */
  1554. [1] = 4, /* High Throughput channels */
  1555. [2] = 0, /* Ultra High Throughput channels */
  1556. },
  1557. };
  1558. static struct udma_match_data j721e_mcu_data = {
  1559. .psil_base = 0x6000,
  1560. .enable_memcpy_support = true,
  1561. .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
  1562. .statictr_z_mask = GENMASK(23, 0),
  1563. .rchan_oes_offset = 0x400,
  1564. .tpl_levels = 2,
  1565. .level_start_idx = {
  1566. [0] = 2, /* Normal channels */
  1567. [1] = 0, /* High Throughput channels */
  1568. },
  1569. };
  1570. static const struct udevice_id udma_ids[] = {
  1571. {
  1572. .compatible = "ti,am654-navss-main-udmap",
  1573. .data = (ulong)&am654_main_data,
  1574. },
  1575. {
  1576. .compatible = "ti,am654-navss-mcu-udmap",
  1577. .data = (ulong)&am654_mcu_data,
  1578. }, {
  1579. .compatible = "ti,j721e-navss-main-udmap",
  1580. .data = (ulong)&j721e_main_data,
  1581. }, {
  1582. .compatible = "ti,j721e-navss-mcu-udmap",
  1583. .data = (ulong)&j721e_mcu_data,
  1584. },
  1585. { /* Sentinel */ },
  1586. };
  1587. U_BOOT_DRIVER(ti_edma3) = {
  1588. .name = "ti-udma",
  1589. .id = UCLASS_DMA,
  1590. .of_match = udma_ids,
  1591. .ops = &udma_ops,
  1592. .probe = udma_probe,
  1593. .priv_auto_alloc_size = sizeof(struct udma_dev),
  1594. };