bcm6348-iudma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
  4. *
  5. * Derived from linux/drivers/dma/bcm63xx-iudma.c:
  6. * Copyright (C) 2015 Simon Arlott <simon@fire.lp0.eu>
  7. *
  8. * Derived from linux/drivers/net/ethernet/broadcom/bcm63xx_enet.c:
  9. * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  10. *
  11. * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
  12. * Copyright (C) 2000-2010 Broadcom Corporation
  13. *
  14. * Derived from bcm963xx_4.12L.06B_consumer/bcmdrivers/opensource/net/enet/impl4/bcmenet.c:
  15. * Copyright (C) 2010 Broadcom Corporation
  16. */
  17. #include <common.h>
  18. #include <clk.h>
  19. #include <cpu_func.h>
  20. #include <dm.h>
  21. #include <dma-uclass.h>
  22. #include <log.h>
  23. #include <malloc.h>
  24. #include <memalign.h>
  25. #include <net.h>
  26. #include <reset.h>
  27. #include <asm/io.h>
  28. #include <linux/bitops.h>
  29. #include <linux/delay.h>
  30. #define DMA_RX_DESC 6
  31. #define DMA_TX_DESC 1
  32. /* DMA Channels */
  33. #define DMA_CHAN_FLOWC(x) ((x) >> 1)
  34. #define DMA_CHAN_MAX 16
  35. #define DMA_CHAN_SIZE 0x10
  36. #define DMA_CHAN_TOUT 500
  37. /* DMA Global Configuration register */
  38. #define DMA_CFG_REG 0x00
  39. #define DMA_CFG_ENABLE_SHIFT 0
  40. #define DMA_CFG_ENABLE_MASK (1 << DMA_CFG_ENABLE_SHIFT)
  41. #define DMA_CFG_FLOWC_ENABLE(x) BIT(DMA_CHAN_FLOWC(x) + 1)
  42. #define DMA_CFG_NCHANS_SHIFT 24
  43. #define DMA_CFG_NCHANS_MASK (0xf << DMA_CFG_NCHANS_SHIFT)
  44. /* DMA Global Flow Control registers */
  45. #define DMA_FLOWC_THR_LO_REG(x) (0x04 + DMA_CHAN_FLOWC(x) * 0x0c)
  46. #define DMA_FLOWC_THR_HI_REG(x) (0x08 + DMA_CHAN_FLOWC(x) * 0x0c)
  47. #define DMA_FLOWC_ALLOC_REG(x) (0x0c + DMA_CHAN_FLOWC(x) * 0x0c)
  48. #define DMA_FLOWC_ALLOC_FORCE_SHIFT 31
  49. #define DMA_FLOWC_ALLOC_FORCE_MASK (1 << DMA_FLOWC_ALLOC_FORCE_SHIFT)
  50. /* DMA Global Reset register */
  51. #define DMA_RST_REG 0x34
  52. #define DMA_RST_CHAN_SHIFT 0
  53. #define DMA_RST_CHAN_MASK(x) (1 << x)
  54. /* DMA Channel Configuration register */
  55. #define DMAC_CFG_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
  56. #define DMAC_CFG_ENABLE_SHIFT 0
  57. #define DMAC_CFG_ENABLE_MASK (1 << DMAC_CFG_ENABLE_SHIFT)
  58. #define DMAC_CFG_PKT_HALT_SHIFT 1
  59. #define DMAC_CFG_PKT_HALT_MASK (1 << DMAC_CFG_PKT_HALT_SHIFT)
  60. #define DMAC_CFG_BRST_HALT_SHIFT 2
  61. #define DMAC_CFG_BRST_HALT_MASK (1 << DMAC_CFG_BRST_HALT_SHIFT)
  62. /* DMA Channel Max Burst Length register */
  63. #define DMAC_BURST_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
  64. /* DMA SRAM Descriptor Ring Start register */
  65. #define DMAS_RSTART_REG(x) (DMA_CHAN_SIZE * (x) + 0x00)
  66. /* DMA SRAM State/Bytes done/ring offset register */
  67. #define DMAS_STATE_DATA_REG(x) (DMA_CHAN_SIZE * (x) + 0x04)
  68. /* DMA SRAM Buffer Descriptor status and length register */
  69. #define DMAS_DESC_LEN_STATUS_REG(x) (DMA_CHAN_SIZE * (x) + 0x08)
  70. /* DMA SRAM Buffer Descriptor status and length register */
  71. #define DMAS_DESC_BASE_BUFPTR_REG(x) (DMA_CHAN_SIZE * (x) + 0x0c)
  72. /* DMA Descriptor Status */
  73. #define DMAD_ST_CRC_SHIFT 8
  74. #define DMAD_ST_CRC_MASK (1 << DMAD_ST_CRC_SHIFT)
  75. #define DMAD_ST_WRAP_SHIFT 12
  76. #define DMAD_ST_WRAP_MASK (1 << DMAD_ST_WRAP_SHIFT)
  77. #define DMAD_ST_SOP_SHIFT 13
  78. #define DMAD_ST_SOP_MASK (1 << DMAD_ST_SOP_SHIFT)
  79. #define DMAD_ST_EOP_SHIFT 14
  80. #define DMAD_ST_EOP_MASK (1 << DMAD_ST_EOP_SHIFT)
  81. #define DMAD_ST_OWN_SHIFT 15
  82. #define DMAD_ST_OWN_MASK (1 << DMAD_ST_OWN_SHIFT)
  83. #define DMAD6348_ST_OV_ERR_SHIFT 0
  84. #define DMAD6348_ST_OV_ERR_MASK (1 << DMAD6348_ST_OV_ERR_SHIFT)
  85. #define DMAD6348_ST_CRC_ERR_SHIFT 1
  86. #define DMAD6348_ST_CRC_ERR_MASK (1 << DMAD6348_ST_CRC_ERR_SHIFT)
  87. #define DMAD6348_ST_RX_ERR_SHIFT 2
  88. #define DMAD6348_ST_RX_ERR_MASK (1 << DMAD6348_ST_RX_ERR_SHIFT)
  89. #define DMAD6348_ST_OS_ERR_SHIFT 4
  90. #define DMAD6348_ST_OS_ERR_MASK (1 << DMAD6348_ST_OS_ERR_SHIFT)
  91. #define DMAD6348_ST_UN_ERR_SHIFT 9
  92. #define DMAD6348_ST_UN_ERR_MASK (1 << DMAD6348_ST_UN_ERR_SHIFT)
  93. struct bcm6348_dma_desc {
  94. uint16_t length;
  95. uint16_t status;
  96. uint32_t address;
  97. };
  98. struct bcm6348_chan_priv {
  99. void __iomem *dma_ring;
  100. uint8_t dma_ring_size;
  101. uint8_t desc_id;
  102. uint8_t desc_cnt;
  103. bool *busy_desc;
  104. bool running;
  105. };
  106. struct bcm6348_iudma_hw {
  107. uint16_t err_mask;
  108. };
  109. struct bcm6348_iudma_priv {
  110. const struct bcm6348_iudma_hw *hw;
  111. void __iomem *base;
  112. void __iomem *chan;
  113. void __iomem *sram;
  114. struct bcm6348_chan_priv **ch_priv;
  115. uint8_t n_channels;
  116. };
  117. static inline bool bcm6348_iudma_chan_is_rx(uint8_t ch)
  118. {
  119. return !(ch & 1);
  120. }
  121. static inline void bcm6348_iudma_fdc(void *ptr, ulong size)
  122. {
  123. ulong start = (ulong) ptr;
  124. flush_dcache_range(start, start + size);
  125. }
  126. static inline void bcm6348_iudma_idc(void *ptr, ulong size)
  127. {
  128. ulong start = (ulong) ptr;
  129. invalidate_dcache_range(start, start + size);
  130. }
  131. static void bcm6348_iudma_chan_stop(struct bcm6348_iudma_priv *priv,
  132. uint8_t ch)
  133. {
  134. unsigned int timeout = DMA_CHAN_TOUT;
  135. do {
  136. uint32_t cfg, halt;
  137. if (timeout > DMA_CHAN_TOUT / 2)
  138. halt = DMAC_CFG_PKT_HALT_MASK;
  139. else
  140. halt = DMAC_CFG_BRST_HALT_MASK;
  141. /* try to stop dma channel */
  142. writel_be(halt, priv->chan + DMAC_CFG_REG(ch));
  143. mb();
  144. /* check if channel was stopped */
  145. cfg = readl_be(priv->chan + DMAC_CFG_REG(ch));
  146. if (!(cfg & DMAC_CFG_ENABLE_MASK))
  147. break;
  148. udelay(1);
  149. } while (--timeout);
  150. if (!timeout)
  151. pr_err("unable to stop channel %u\n", ch);
  152. /* reset dma channel */
  153. setbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
  154. mb();
  155. clrbits_be32(priv->base + DMA_RST_REG, DMA_RST_CHAN_MASK(ch));
  156. }
  157. static int bcm6348_iudma_disable(struct dma *dma)
  158. {
  159. struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  160. struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
  161. /* stop dma channel */
  162. bcm6348_iudma_chan_stop(priv, dma->id);
  163. /* dma flow control */
  164. if (bcm6348_iudma_chan_is_rx(dma->id))
  165. writel_be(DMA_FLOWC_ALLOC_FORCE_MASK,
  166. DMA_FLOWC_ALLOC_REG(dma->id));
  167. /* init channel config */
  168. ch_priv->running = false;
  169. ch_priv->desc_id = 0;
  170. if (bcm6348_iudma_chan_is_rx(dma->id))
  171. ch_priv->desc_cnt = 0;
  172. else
  173. ch_priv->desc_cnt = ch_priv->dma_ring_size;
  174. return 0;
  175. }
  176. static int bcm6348_iudma_enable(struct dma *dma)
  177. {
  178. const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  179. struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
  180. struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
  181. uint8_t i;
  182. /* dma ring init */
  183. for (i = 0; i < ch_priv->desc_cnt; i++) {
  184. if (bcm6348_iudma_chan_is_rx(dma->id)) {
  185. ch_priv->busy_desc[i] = false;
  186. dma_desc->status |= DMAD_ST_OWN_MASK;
  187. } else {
  188. dma_desc->status = 0;
  189. dma_desc->length = 0;
  190. dma_desc->address = 0;
  191. }
  192. if (i == ch_priv->desc_cnt - 1)
  193. dma_desc->status |= DMAD_ST_WRAP_MASK;
  194. dma_desc++;
  195. }
  196. /* init to first descriptor */
  197. ch_priv->desc_id = 0;
  198. /* force cache writeback */
  199. bcm6348_iudma_fdc(ch_priv->dma_ring,
  200. sizeof(*dma_desc) * ch_priv->desc_cnt);
  201. /* clear sram */
  202. writel_be(0, priv->sram + DMAS_STATE_DATA_REG(dma->id));
  203. writel_be(0, priv->sram + DMAS_DESC_LEN_STATUS_REG(dma->id));
  204. writel_be(0, priv->sram + DMAS_DESC_BASE_BUFPTR_REG(dma->id));
  205. /* set dma ring start */
  206. writel_be(virt_to_phys(ch_priv->dma_ring),
  207. priv->sram + DMAS_RSTART_REG(dma->id));
  208. /* set flow control */
  209. if (bcm6348_iudma_chan_is_rx(dma->id)) {
  210. u32 val;
  211. setbits_be32(priv->base + DMA_CFG_REG,
  212. DMA_CFG_FLOWC_ENABLE(dma->id));
  213. val = ch_priv->desc_cnt / 3;
  214. writel_be(val, priv->base + DMA_FLOWC_THR_LO_REG(dma->id));
  215. val = (ch_priv->desc_cnt * 2) / 3;
  216. writel_be(val, priv->base + DMA_FLOWC_THR_HI_REG(dma->id));
  217. writel_be(0, priv->base + DMA_FLOWC_ALLOC_REG(dma->id));
  218. }
  219. /* set dma max burst */
  220. writel_be(ch_priv->desc_cnt,
  221. priv->chan + DMAC_BURST_REG(dma->id));
  222. /* kick rx dma channel */
  223. if (bcm6348_iudma_chan_is_rx(dma->id))
  224. setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
  225. DMAC_CFG_ENABLE_MASK);
  226. /* channel is now enabled */
  227. ch_priv->running = true;
  228. return 0;
  229. }
  230. static int bcm6348_iudma_request(struct dma *dma)
  231. {
  232. const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  233. struct bcm6348_chan_priv *ch_priv;
  234. /* check if channel is valid */
  235. if (dma->id >= priv->n_channels)
  236. return -ENODEV;
  237. /* alloc channel private data */
  238. priv->ch_priv[dma->id] = calloc(1, sizeof(struct bcm6348_chan_priv));
  239. if (!priv->ch_priv[dma->id])
  240. return -ENOMEM;
  241. ch_priv = priv->ch_priv[dma->id];
  242. /* alloc dma ring */
  243. if (bcm6348_iudma_chan_is_rx(dma->id))
  244. ch_priv->dma_ring_size = DMA_RX_DESC;
  245. else
  246. ch_priv->dma_ring_size = DMA_TX_DESC;
  247. ch_priv->dma_ring =
  248. malloc_cache_aligned(sizeof(struct bcm6348_dma_desc) *
  249. ch_priv->dma_ring_size);
  250. if (!ch_priv->dma_ring)
  251. return -ENOMEM;
  252. /* init channel config */
  253. ch_priv->running = false;
  254. ch_priv->desc_id = 0;
  255. if (bcm6348_iudma_chan_is_rx(dma->id)) {
  256. ch_priv->desc_cnt = 0;
  257. ch_priv->busy_desc = calloc(ch_priv->desc_cnt, sizeof(bool));
  258. } else {
  259. ch_priv->desc_cnt = ch_priv->dma_ring_size;
  260. ch_priv->busy_desc = NULL;
  261. }
  262. return 0;
  263. }
  264. static int bcm6348_iudma_receive(struct dma *dma, void **dst, void *metadata)
  265. {
  266. const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  267. const struct bcm6348_iudma_hw *hw = priv->hw;
  268. struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
  269. struct bcm6348_dma_desc *dma_desc = dma_desc = ch_priv->dma_ring;
  270. int ret;
  271. if (!ch_priv->running)
  272. return -EINVAL;
  273. /* get dma ring descriptor address */
  274. dma_desc += ch_priv->desc_id;
  275. /* invalidate cache data */
  276. bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
  277. /* check dma own */
  278. if (dma_desc->status & DMAD_ST_OWN_MASK)
  279. return -EAGAIN;
  280. /* check pkt */
  281. if (!(dma_desc->status & DMAD_ST_EOP_MASK) ||
  282. !(dma_desc->status & DMAD_ST_SOP_MASK) ||
  283. (dma_desc->status & hw->err_mask)) {
  284. pr_err("invalid pkt received (ch=%ld desc=%u) (st=%04x)\n",
  285. dma->id, ch_priv->desc_id, dma_desc->status);
  286. ret = -EAGAIN;
  287. } else {
  288. /* set dma buffer address */
  289. *dst = phys_to_virt(dma_desc->address);
  290. /* invalidate cache data */
  291. bcm6348_iudma_idc(*dst, dma_desc->length);
  292. /* return packet length */
  293. ret = dma_desc->length;
  294. }
  295. /* busy dma descriptor */
  296. ch_priv->busy_desc[ch_priv->desc_id] = true;
  297. /* increment dma descriptor */
  298. ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
  299. return ret;
  300. }
  301. static int bcm6348_iudma_send(struct dma *dma, void *src, size_t len,
  302. void *metadata)
  303. {
  304. const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  305. struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
  306. struct bcm6348_dma_desc *dma_desc;
  307. uint16_t status;
  308. if (!ch_priv->running)
  309. return -EINVAL;
  310. /* flush cache */
  311. bcm6348_iudma_fdc(src, len);
  312. /* get dma ring descriptor address */
  313. dma_desc = ch_priv->dma_ring;
  314. dma_desc += ch_priv->desc_id;
  315. /* config dma descriptor */
  316. status = (DMAD_ST_OWN_MASK |
  317. DMAD_ST_EOP_MASK |
  318. DMAD_ST_CRC_MASK |
  319. DMAD_ST_SOP_MASK);
  320. if (ch_priv->desc_id == ch_priv->desc_cnt - 1)
  321. status |= DMAD_ST_WRAP_MASK;
  322. /* set dma descriptor */
  323. dma_desc->address = virt_to_phys(src);
  324. dma_desc->length = len;
  325. dma_desc->status = status;
  326. /* flush cache */
  327. bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
  328. /* kick tx dma channel */
  329. setbits_be32(priv->chan + DMAC_CFG_REG(dma->id), DMAC_CFG_ENABLE_MASK);
  330. /* poll dma status */
  331. do {
  332. /* invalidate cache */
  333. bcm6348_iudma_idc(dma_desc, sizeof(*dma_desc));
  334. if (!(dma_desc->status & DMAD_ST_OWN_MASK))
  335. break;
  336. } while(1);
  337. /* increment dma descriptor */
  338. ch_priv->desc_id = (ch_priv->desc_id + 1) % ch_priv->desc_cnt;
  339. return 0;
  340. }
  341. static int bcm6348_iudma_free_rcv_buf(struct dma *dma, void *dst, size_t size)
  342. {
  343. const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  344. struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
  345. struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
  346. uint16_t status;
  347. uint8_t i;
  348. u32 cfg;
  349. /* get dirty dma descriptor */
  350. for (i = 0; i < ch_priv->desc_cnt; i++) {
  351. if (phys_to_virt(dma_desc->address) == dst)
  352. break;
  353. dma_desc++;
  354. }
  355. /* dma descriptor not found */
  356. if (i == ch_priv->desc_cnt) {
  357. pr_err("dirty dma descriptor not found\n");
  358. return -ENOENT;
  359. }
  360. /* invalidate cache */
  361. bcm6348_iudma_idc(ch_priv->dma_ring,
  362. sizeof(*dma_desc) * ch_priv->desc_cnt);
  363. /* free dma descriptor */
  364. ch_priv->busy_desc[i] = false;
  365. status = DMAD_ST_OWN_MASK;
  366. if (i == ch_priv->desc_cnt - 1)
  367. status |= DMAD_ST_WRAP_MASK;
  368. dma_desc->status |= status;
  369. dma_desc->length = PKTSIZE_ALIGN;
  370. /* tell dma we allocated one buffer */
  371. writel_be(1, DMA_FLOWC_ALLOC_REG(dma->id));
  372. /* flush cache */
  373. bcm6348_iudma_fdc(ch_priv->dma_ring,
  374. sizeof(*dma_desc) * ch_priv->desc_cnt);
  375. /* kick rx dma channel if disabled */
  376. cfg = readl_be(priv->chan + DMAC_CFG_REG(dma->id));
  377. if (!(cfg & DMAC_CFG_ENABLE_MASK))
  378. setbits_be32(priv->chan + DMAC_CFG_REG(dma->id),
  379. DMAC_CFG_ENABLE_MASK);
  380. return 0;
  381. }
  382. static int bcm6348_iudma_add_rcv_buf(struct dma *dma, void *dst, size_t size)
  383. {
  384. const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  385. struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
  386. struct bcm6348_dma_desc *dma_desc = ch_priv->dma_ring;
  387. /* no more dma descriptors available */
  388. if (ch_priv->desc_cnt == ch_priv->dma_ring_size) {
  389. pr_err("max number of buffers reached\n");
  390. return -EINVAL;
  391. }
  392. /* get next dma descriptor */
  393. dma_desc += ch_priv->desc_cnt;
  394. /* init dma descriptor */
  395. dma_desc->address = virt_to_phys(dst);
  396. dma_desc->length = size;
  397. dma_desc->status = 0;
  398. /* flush cache */
  399. bcm6348_iudma_fdc(dma_desc, sizeof(*dma_desc));
  400. /* increment dma descriptors */
  401. ch_priv->desc_cnt++;
  402. return 0;
  403. }
  404. static int bcm6348_iudma_prepare_rcv_buf(struct dma *dma, void *dst,
  405. size_t size)
  406. {
  407. const struct bcm6348_iudma_priv *priv = dev_get_priv(dma->dev);
  408. struct bcm6348_chan_priv *ch_priv = priv->ch_priv[dma->id];
  409. /* only add new rx buffers if channel isn't running */
  410. if (ch_priv->running)
  411. return bcm6348_iudma_free_rcv_buf(dma, dst, size);
  412. else
  413. return bcm6348_iudma_add_rcv_buf(dma, dst, size);
  414. }
  415. static const struct dma_ops bcm6348_iudma_ops = {
  416. .disable = bcm6348_iudma_disable,
  417. .enable = bcm6348_iudma_enable,
  418. .prepare_rcv_buf = bcm6348_iudma_prepare_rcv_buf,
  419. .request = bcm6348_iudma_request,
  420. .receive = bcm6348_iudma_receive,
  421. .send = bcm6348_iudma_send,
  422. };
  423. static const struct bcm6348_iudma_hw bcm6348_hw = {
  424. .err_mask = (DMAD6348_ST_OV_ERR_MASK |
  425. DMAD6348_ST_CRC_ERR_MASK |
  426. DMAD6348_ST_RX_ERR_MASK |
  427. DMAD6348_ST_OS_ERR_MASK |
  428. DMAD6348_ST_UN_ERR_MASK),
  429. };
  430. static const struct bcm6348_iudma_hw bcm6368_hw = {
  431. .err_mask = 0,
  432. };
  433. static const struct udevice_id bcm6348_iudma_ids[] = {
  434. {
  435. .compatible = "brcm,bcm6348-iudma",
  436. .data = (ulong)&bcm6348_hw,
  437. }, {
  438. .compatible = "brcm,bcm6368-iudma",
  439. .data = (ulong)&bcm6368_hw,
  440. }, { /* sentinel */ }
  441. };
  442. static int bcm6348_iudma_probe(struct udevice *dev)
  443. {
  444. struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
  445. struct bcm6348_iudma_priv *priv = dev_get_priv(dev);
  446. const struct bcm6348_iudma_hw *hw =
  447. (const struct bcm6348_iudma_hw *)dev_get_driver_data(dev);
  448. uint8_t ch;
  449. int i;
  450. uc_priv->supported = (DMA_SUPPORTS_DEV_TO_MEM |
  451. DMA_SUPPORTS_MEM_TO_DEV);
  452. priv->hw = hw;
  453. /* dma global base address */
  454. priv->base = dev_remap_addr_name(dev, "dma");
  455. if (!priv->base)
  456. return -EINVAL;
  457. /* dma channels base address */
  458. priv->chan = dev_remap_addr_name(dev, "dma-channels");
  459. if (!priv->chan)
  460. return -EINVAL;
  461. /* dma sram base address */
  462. priv->sram = dev_remap_addr_name(dev, "dma-sram");
  463. if (!priv->sram)
  464. return -EINVAL;
  465. /* get number of channels */
  466. priv->n_channels = dev_read_u32_default(dev, "dma-channels", 8);
  467. if (priv->n_channels > DMA_CHAN_MAX)
  468. return -EINVAL;
  469. /* try to enable clocks */
  470. for (i = 0; ; i++) {
  471. struct clk clk;
  472. int ret;
  473. ret = clk_get_by_index(dev, i, &clk);
  474. if (ret < 0)
  475. break;
  476. ret = clk_enable(&clk);
  477. if (ret < 0) {
  478. pr_err("error enabling clock %d\n", i);
  479. return ret;
  480. }
  481. ret = clk_free(&clk);
  482. if (ret < 0) {
  483. pr_err("error freeing clock %d\n", i);
  484. return ret;
  485. }
  486. }
  487. /* try to perform resets */
  488. for (i = 0; ; i++) {
  489. struct reset_ctl reset;
  490. int ret;
  491. ret = reset_get_by_index(dev, i, &reset);
  492. if (ret < 0)
  493. break;
  494. ret = reset_deassert(&reset);
  495. if (ret < 0) {
  496. pr_err("error deasserting reset %d\n", i);
  497. return ret;
  498. }
  499. ret = reset_free(&reset);
  500. if (ret < 0) {
  501. pr_err("error freeing reset %d\n", i);
  502. return ret;
  503. }
  504. }
  505. /* disable dma controller */
  506. clrbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
  507. /* alloc channel private data pointers */
  508. priv->ch_priv = calloc(priv->n_channels,
  509. sizeof(struct bcm6348_chan_priv*));
  510. if (!priv->ch_priv)
  511. return -ENOMEM;
  512. /* stop dma channels */
  513. for (ch = 0; ch < priv->n_channels; ch++)
  514. bcm6348_iudma_chan_stop(priv, ch);
  515. /* enable dma controller */
  516. setbits_be32(priv->base + DMA_CFG_REG, DMA_CFG_ENABLE_MASK);
  517. return 0;
  518. }
  519. U_BOOT_DRIVER(bcm6348_iudma) = {
  520. .name = "bcm6348_iudma",
  521. .id = UCLASS_DMA,
  522. .of_match = bcm6348_iudma_ids,
  523. .ops = &bcm6348_iudma_ops,
  524. .priv_auto_alloc_size = sizeof(struct bcm6348_iudma_priv),
  525. .probe = bcm6348_iudma_probe,
  526. };