bcm-sf2-eth-gmac.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2014-2017 Broadcom.
  4. */
  5. #ifdef BCM_GMAC_DEBUG
  6. #ifndef DEBUG
  7. #define DEBUG
  8. #endif
  9. #endif
  10. #include <config.h>
  11. #include <common.h>
  12. #include <cpu_func.h>
  13. #include <log.h>
  14. #include <malloc.h>
  15. #include <net.h>
  16. #include <asm/cache.h>
  17. #include <asm/io.h>
  18. #include <phy.h>
  19. #include <linux/delay.h>
  20. #include <linux/bitops.h>
  21. #include "bcm-sf2-eth.h"
  22. #include "bcm-sf2-eth-gmac.h"
  23. #define SPINWAIT(exp, us) { \
  24. uint countdown = (us) + 9; \
  25. while ((exp) && (countdown >= 10)) {\
  26. udelay(10); \
  27. countdown -= 10; \
  28. } \
  29. }
  30. #define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
  31. #define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
  32. #define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
  33. static int gmac_disable_dma(struct eth_dma *dma, int dir);
  34. static int gmac_enable_dma(struct eth_dma *dma, int dir);
  35. /* DMA Descriptor */
  36. typedef struct {
  37. /* misc control bits */
  38. uint32_t ctrl1;
  39. /* buffer count and address extension */
  40. uint32_t ctrl2;
  41. /* memory address of the date buffer, bits 31:0 */
  42. uint32_t addrlow;
  43. /* memory address of the date buffer, bits 63:32 */
  44. uint32_t addrhigh;
  45. } dma64dd_t;
  46. uint32_t g_dmactrlflags;
  47. static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
  48. {
  49. debug("%s enter\n", __func__);
  50. g_dmactrlflags &= ~mask;
  51. g_dmactrlflags |= flags;
  52. /* If trying to enable parity, check if parity is actually supported */
  53. if (g_dmactrlflags & DMA_CTRL_PEN) {
  54. uint32_t control;
  55. control = readl(GMAC0_DMA_TX_CTRL_ADDR);
  56. writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
  57. if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
  58. /*
  59. * We *can* disable it, therefore it is supported;
  60. * restore control register
  61. */
  62. writel(control, GMAC0_DMA_TX_CTRL_ADDR);
  63. } else {
  64. /* Not supported, don't allow it to be enabled */
  65. g_dmactrlflags &= ~DMA_CTRL_PEN;
  66. }
  67. }
  68. return g_dmactrlflags;
  69. }
  70. static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
  71. {
  72. uint32_t v = readl(reg);
  73. v &= ~(value);
  74. writel(v, reg);
  75. }
  76. static inline void reg32_set_bits(uint32_t reg, uint32_t value)
  77. {
  78. uint32_t v = readl(reg);
  79. v |= value;
  80. writel(v, reg);
  81. }
  82. #ifdef BCM_GMAC_DEBUG
  83. static void dma_tx_dump(struct eth_dma *dma)
  84. {
  85. dma64dd_t *descp = NULL;
  86. uint8_t *bufp;
  87. int i;
  88. printf("TX DMA Register:\n");
  89. printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
  90. readl(GMAC0_DMA_TX_CTRL_ADDR),
  91. readl(GMAC0_DMA_TX_PTR_ADDR),
  92. readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
  93. readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
  94. readl(GMAC0_DMA_TX_STATUS0_ADDR),
  95. readl(GMAC0_DMA_TX_STATUS1_ADDR));
  96. printf("TX Descriptors:\n");
  97. for (i = 0; i < TX_BUF_NUM; i++) {
  98. descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
  99. printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
  100. descp->ctrl1, descp->ctrl2,
  101. descp->addrhigh, descp->addrlow);
  102. }
  103. printf("TX Buffers:\n");
  104. /* Initialize TX DMA descriptor table */
  105. for (i = 0; i < TX_BUF_NUM; i++) {
  106. bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
  107. printf("buf%d:0x%x; ", i, (uint32_t)bufp);
  108. }
  109. printf("\n");
  110. }
  111. static void dma_rx_dump(struct eth_dma *dma)
  112. {
  113. dma64dd_t *descp = NULL;
  114. uint8_t *bufp;
  115. int i;
  116. printf("RX DMA Register:\n");
  117. printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
  118. readl(GMAC0_DMA_RX_CTRL_ADDR),
  119. readl(GMAC0_DMA_RX_PTR_ADDR),
  120. readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
  121. readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
  122. readl(GMAC0_DMA_RX_STATUS0_ADDR),
  123. readl(GMAC0_DMA_RX_STATUS1_ADDR));
  124. printf("RX Descriptors:\n");
  125. for (i = 0; i < RX_BUF_NUM; i++) {
  126. descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
  127. printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
  128. descp->ctrl1, descp->ctrl2,
  129. descp->addrhigh, descp->addrlow);
  130. }
  131. printf("RX Buffers:\n");
  132. for (i = 0; i < RX_BUF_NUM; i++) {
  133. bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
  134. printf("buf%d:0x%x; ", i, (uint32_t)bufp);
  135. }
  136. printf("\n");
  137. }
  138. #endif
  139. static int dma_tx_init(struct eth_dma *dma)
  140. {
  141. dma64dd_t *descp = NULL;
  142. uint8_t *bufp;
  143. int i;
  144. uint32_t ctrl;
  145. debug("%s enter\n", __func__);
  146. /* clear descriptor memory */
  147. memset((void *)(dma->tx_desc_aligned), 0,
  148. TX_BUF_NUM * DESCP_SIZE_ALIGNED);
  149. memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
  150. /* Initialize TX DMA descriptor table */
  151. for (i = 0; i < TX_BUF_NUM; i++) {
  152. descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
  153. bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
  154. /* clear buffer memory */
  155. memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
  156. ctrl = 0;
  157. /* if last descr set endOfTable */
  158. if (i == (TX_BUF_NUM-1))
  159. ctrl = D64_CTRL1_EOT;
  160. descp->ctrl1 = ctrl;
  161. descp->ctrl2 = 0;
  162. descp->addrlow = (uint32_t)bufp;
  163. descp->addrhigh = 0;
  164. }
  165. /* flush descriptor and buffer */
  166. descp = dma->tx_desc_aligned;
  167. bufp = dma->tx_buf;
  168. flush_dcache_range((unsigned long)descp,
  169. (unsigned long)descp +
  170. DESCP_SIZE_ALIGNED * TX_BUF_NUM);
  171. flush_dcache_range((unsigned long)bufp,
  172. (unsigned long)bufp +
  173. TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
  174. /* initialize the DMA channel */
  175. writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
  176. writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
  177. /* now update the dma last descriptor */
  178. writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
  179. GMAC0_DMA_TX_PTR_ADDR);
  180. return 0;
  181. }
  182. static int dma_rx_init(struct eth_dma *dma)
  183. {
  184. uint32_t last_desc;
  185. dma64dd_t *descp = NULL;
  186. uint8_t *bufp;
  187. uint32_t ctrl;
  188. int i;
  189. debug("%s enter\n", __func__);
  190. /* clear descriptor memory */
  191. memset((void *)(dma->rx_desc_aligned), 0,
  192. RX_BUF_NUM * DESCP_SIZE_ALIGNED);
  193. /* clear buffer memory */
  194. memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
  195. /* Initialize RX DMA descriptor table */
  196. for (i = 0; i < RX_BUF_NUM; i++) {
  197. descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
  198. bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
  199. ctrl = 0;
  200. /* if last descr set endOfTable */
  201. if (i == (RX_BUF_NUM - 1))
  202. ctrl = D64_CTRL1_EOT;
  203. descp->ctrl1 = ctrl;
  204. descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
  205. descp->addrlow = (uint32_t)bufp;
  206. descp->addrhigh = 0;
  207. last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
  208. + sizeof(dma64dd_t);
  209. }
  210. descp = dma->rx_desc_aligned;
  211. bufp = dma->rx_buf;
  212. /* flush descriptor and buffer */
  213. flush_dcache_range((unsigned long)descp,
  214. (unsigned long)descp +
  215. DESCP_SIZE_ALIGNED * RX_BUF_NUM);
  216. flush_dcache_range((unsigned long)(bufp),
  217. (unsigned long)bufp +
  218. RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
  219. /* initailize the DMA channel */
  220. writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
  221. writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
  222. /* now update the dma last descriptor */
  223. writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
  224. return 0;
  225. }
  226. static int dma_init(struct eth_dma *dma)
  227. {
  228. debug(" %s enter\n", __func__);
  229. /*
  230. * Default flags: For backwards compatibility both
  231. * Rx Overflow Continue and Parity are DISABLED.
  232. */
  233. dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
  234. debug("rx burst len 0x%x\n",
  235. (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
  236. >> D64_RC_BL_SHIFT);
  237. debug("tx burst len 0x%x\n",
  238. (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
  239. >> D64_XC_BL_SHIFT);
  240. dma_tx_init(dma);
  241. dma_rx_init(dma);
  242. /* From end of chip_init() */
  243. /* enable the overflow continue feature and disable parity */
  244. dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
  245. DMA_CTRL_ROC /* value */);
  246. return 0;
  247. }
  248. static int dma_deinit(struct eth_dma *dma)
  249. {
  250. debug(" %s enter\n", __func__);
  251. gmac_disable_dma(dma, MAC_DMA_RX);
  252. gmac_disable_dma(dma, MAC_DMA_TX);
  253. free(dma->tx_buf);
  254. dma->tx_buf = NULL;
  255. free(dma->tx_desc_aligned);
  256. dma->tx_desc_aligned = NULL;
  257. free(dma->rx_buf);
  258. dma->rx_buf = NULL;
  259. free(dma->rx_desc_aligned);
  260. dma->rx_desc_aligned = NULL;
  261. return 0;
  262. }
  263. int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
  264. {
  265. uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
  266. /* kick off the dma */
  267. size_t len = length;
  268. int txout = dma->cur_tx_index;
  269. uint32_t flags;
  270. dma64dd_t *descp = NULL;
  271. uint32_t ctrl;
  272. uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
  273. sizeof(dma64dd_t)) & D64_XP_LD_MASK;
  274. size_t buflen;
  275. debug("%s enter\n", __func__);
  276. /* load the buffer */
  277. memcpy(bufp, packet, len);
  278. /* Add 4 bytes for Ethernet FCS/CRC */
  279. buflen = len + 4;
  280. ctrl = (buflen & D64_CTRL2_BC_MASK);
  281. /* the transmit will only be one frame or set SOF, EOF */
  282. /* also set int on completion */
  283. flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
  284. /* txout points to the descriptor to uset */
  285. /* if last descriptor then set EOT */
  286. if (txout == (TX_BUF_NUM - 1)) {
  287. flags |= D64_CTRL1_EOT;
  288. last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
  289. }
  290. /* write the descriptor */
  291. descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
  292. descp->addrlow = (uint32_t)bufp;
  293. descp->addrhigh = 0;
  294. descp->ctrl1 = flags;
  295. descp->ctrl2 = ctrl;
  296. /* flush descriptor and buffer */
  297. flush_dcache_range((unsigned long)dma->tx_desc_aligned,
  298. (unsigned long)dma->tx_desc_aligned +
  299. DESCP_SIZE_ALIGNED * TX_BUF_NUM);
  300. flush_dcache_range((unsigned long)bufp,
  301. (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
  302. /* now update the dma last descriptor */
  303. writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
  304. /* tx dma should be enabled so packet should go out */
  305. /* update txout */
  306. dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
  307. return 0;
  308. }
  309. bool gmac_check_tx_done(struct eth_dma *dma)
  310. {
  311. /* wait for tx to complete */
  312. uint32_t intstatus;
  313. bool xfrdone = false;
  314. debug("%s enter\n", __func__);
  315. intstatus = readl(GMAC0_INT_STATUS_ADDR);
  316. debug("int(0x%x)\n", intstatus);
  317. if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
  318. xfrdone = true;
  319. /* clear the int bits */
  320. intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
  321. writel(intstatus, GMAC0_INT_STATUS_ADDR);
  322. } else {
  323. debug("Tx int(0x%x)\n", intstatus);
  324. }
  325. return xfrdone;
  326. }
  327. int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
  328. {
  329. void *bufp, *datap;
  330. size_t rcvlen = 0, buflen = 0;
  331. uint32_t stat0 = 0, stat1 = 0;
  332. uint32_t control, offset;
  333. uint8_t statbuf[HWRXOFF*2];
  334. int index, curr, active;
  335. dma64dd_t *descp = NULL;
  336. /* udelay(50); */
  337. /*
  338. * this api will check if a packet has been received.
  339. * If so it will return the address of the buffer and current
  340. * descriptor index will be incremented to the
  341. * next descriptor. Once done with the frame the buffer should be
  342. * added back onto the descriptor and the lastdscr should be updated
  343. * to this descriptor.
  344. */
  345. index = dma->cur_rx_index;
  346. offset = (uint32_t)(dma->rx_desc_aligned);
  347. stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
  348. stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
  349. curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
  350. active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
  351. /* check if any frame */
  352. if (index == curr)
  353. return -1;
  354. debug("received packet\n");
  355. debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
  356. /* remove warning */
  357. if (index == active)
  358. ;
  359. /* get the packet pointer that corresponds to the rx descriptor */
  360. bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
  361. descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
  362. /* flush descriptor and buffer */
  363. flush_dcache_range((unsigned long)dma->rx_desc_aligned,
  364. (unsigned long)dma->rx_desc_aligned +
  365. DESCP_SIZE_ALIGNED * RX_BUF_NUM);
  366. flush_dcache_range((unsigned long)bufp,
  367. (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
  368. buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
  369. stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
  370. stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
  371. debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
  372. (uint32_t)bufp, index, buflen, stat0, stat1);
  373. dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
  374. /* get buffer offset */
  375. control = readl(GMAC0_DMA_RX_CTRL_ADDR);
  376. offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
  377. rcvlen = *(uint16_t *)bufp;
  378. debug("Received %d bytes\n", rcvlen);
  379. /* copy status into temp buf then copy data from rx buffer */
  380. memcpy(statbuf, bufp, offset);
  381. datap = (void *)((uint32_t)bufp + offset);
  382. memcpy(buf, datap, rcvlen);
  383. /* update descriptor that is being added back on ring */
  384. descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
  385. descp->addrlow = (uint32_t)bufp;
  386. descp->addrhigh = 0;
  387. /* flush descriptor */
  388. flush_dcache_range((unsigned long)dma->rx_desc_aligned,
  389. (unsigned long)dma->rx_desc_aligned +
  390. DESCP_SIZE_ALIGNED * RX_BUF_NUM);
  391. /* set the lastdscr for the rx ring */
  392. writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
  393. return (int)rcvlen;
  394. }
  395. static int gmac_disable_dma(struct eth_dma *dma, int dir)
  396. {
  397. int status;
  398. debug("%s enter\n", __func__);
  399. if (dir == MAC_DMA_TX) {
  400. /* address PR8249/PR7577 issue */
  401. /* suspend tx DMA first */
  402. writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
  403. SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
  404. D64_XS0_XS_MASK)) !=
  405. D64_XS0_XS_DISABLED) &&
  406. (status != D64_XS0_XS_IDLE) &&
  407. (status != D64_XS0_XS_STOPPED), 10000);
  408. /*
  409. * PR2414 WAR: DMA engines are not disabled until
  410. * transfer finishes
  411. */
  412. writel(0, GMAC0_DMA_TX_CTRL_ADDR);
  413. SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
  414. D64_XS0_XS_MASK)) !=
  415. D64_XS0_XS_DISABLED), 10000);
  416. /* wait for the last transaction to complete */
  417. udelay(2);
  418. status = (status == D64_XS0_XS_DISABLED);
  419. } else {
  420. /*
  421. * PR2414 WAR: DMA engines are not disabled until
  422. * transfer finishes
  423. */
  424. writel(0, GMAC0_DMA_RX_CTRL_ADDR);
  425. SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
  426. D64_RS0_RS_MASK)) !=
  427. D64_RS0_RS_DISABLED), 10000);
  428. status = (status == D64_RS0_RS_DISABLED);
  429. }
  430. return status;
  431. }
  432. static int gmac_enable_dma(struct eth_dma *dma, int dir)
  433. {
  434. uint32_t control;
  435. debug("%s enter\n", __func__);
  436. if (dir == MAC_DMA_TX) {
  437. dma->cur_tx_index = 0;
  438. /*
  439. * These bits 20:18 (burstLen) of control register can be
  440. * written but will take effect only if these bits are
  441. * valid. So this will not affect previous versions
  442. * of the DMA. They will continue to have those bits set to 0.
  443. */
  444. control = readl(GMAC0_DMA_TX_CTRL_ADDR);
  445. control |= D64_XC_XE;
  446. if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
  447. control |= D64_XC_PD;
  448. writel(control, GMAC0_DMA_TX_CTRL_ADDR);
  449. /* initailize the DMA channel */
  450. writel((uint32_t)(dma->tx_desc_aligned),
  451. GMAC0_DMA_TX_ADDR_LOW_ADDR);
  452. writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
  453. } else {
  454. dma->cur_rx_index = 0;
  455. control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
  456. D64_RC_AE) | D64_RC_RE;
  457. if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
  458. control |= D64_RC_PD;
  459. if (g_dmactrlflags & DMA_CTRL_ROC)
  460. control |= D64_RC_OC;
  461. /*
  462. * These bits 20:18 (burstLen) of control register can be
  463. * written but will take effect only if these bits are
  464. * valid. So this will not affect previous versions
  465. * of the DMA. They will continue to have those bits set to 0.
  466. */
  467. control &= ~D64_RC_BL_MASK;
  468. /* Keep default Rx burstlen */
  469. control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
  470. control |= HWRXOFF << D64_RC_RO_SHIFT;
  471. writel(control, GMAC0_DMA_RX_CTRL_ADDR);
  472. /*
  473. * the rx descriptor ring should have
  474. * the addresses set properly;
  475. * set the lastdscr for the rx ring
  476. */
  477. writel(((uint32_t)(dma->rx_desc_aligned) +
  478. (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
  479. D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
  480. }
  481. return 0;
  482. }
  483. bool gmac_mii_busywait(unsigned int timeout)
  484. {
  485. uint32_t tmp = 0;
  486. while (timeout > 10) {
  487. tmp = readl(GMAC_MII_CTRL_ADDR);
  488. if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
  489. udelay(10);
  490. timeout -= 10;
  491. } else {
  492. break;
  493. }
  494. }
  495. return tmp & (1 << GMAC_MII_BUSY_SHIFT);
  496. }
  497. int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
  498. {
  499. uint32_t tmp = 0;
  500. u16 value = 0;
  501. /* Busy wait timeout is 1ms */
  502. if (gmac_mii_busywait(1000)) {
  503. pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
  504. return -1;
  505. }
  506. /* Read operation */
  507. tmp = GMAC_MII_DATA_READ_CMD;
  508. tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
  509. (reg << GMAC_MII_PHY_REG_SHIFT);
  510. debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
  511. writel(tmp, GMAC_MII_DATA_ADDR);
  512. if (gmac_mii_busywait(1000)) {
  513. pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
  514. return -1;
  515. }
  516. value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
  517. debug("MII read data 0x%x\n", value);
  518. return value;
  519. }
  520. int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
  521. u16 value)
  522. {
  523. uint32_t tmp = 0;
  524. /* Busy wait timeout is 1ms */
  525. if (gmac_mii_busywait(1000)) {
  526. pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
  527. return -1;
  528. }
  529. /* Write operation */
  530. tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
  531. tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
  532. (reg << GMAC_MII_PHY_REG_SHIFT));
  533. debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
  534. tmp, phyaddr, reg, value);
  535. writel(tmp, GMAC_MII_DATA_ADDR);
  536. if (gmac_mii_busywait(1000)) {
  537. pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
  538. return -1;
  539. }
  540. return 0;
  541. }
  542. void gmac_init_reset(void)
  543. {
  544. debug("%s enter\n", __func__);
  545. /* set command config reg CC_SR */
  546. reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
  547. udelay(GMAC_RESET_DELAY);
  548. }
  549. void gmac_clear_reset(void)
  550. {
  551. debug("%s enter\n", __func__);
  552. /* clear command config reg CC_SR */
  553. reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
  554. udelay(GMAC_RESET_DELAY);
  555. }
  556. static void gmac_enable_local(bool en)
  557. {
  558. uint32_t cmdcfg;
  559. debug("%s enter\n", __func__);
  560. /* read command config reg */
  561. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  562. /* put mac in reset */
  563. gmac_init_reset();
  564. cmdcfg |= CC_SR;
  565. /* first deassert rx_ena and tx_ena while in reset */
  566. cmdcfg &= ~(CC_RE | CC_TE);
  567. /* write command config reg */
  568. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  569. /* bring mac out of reset */
  570. gmac_clear_reset();
  571. /* if not enable exit now */
  572. if (!en)
  573. return;
  574. /* enable the mac transmit and receive paths now */
  575. udelay(2);
  576. cmdcfg &= ~CC_SR;
  577. cmdcfg |= (CC_RE | CC_TE);
  578. /* assert rx_ena and tx_ena when out of reset to enable the mac */
  579. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  580. return;
  581. }
  582. int gmac_enable(void)
  583. {
  584. gmac_enable_local(1);
  585. /* clear interrupts */
  586. writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
  587. return 0;
  588. }
  589. int gmac_disable(void)
  590. {
  591. gmac_enable_local(0);
  592. return 0;
  593. }
  594. int gmac_set_speed(int speed, int duplex)
  595. {
  596. uint32_t cmdcfg;
  597. uint32_t hd_ena;
  598. uint32_t speed_cfg;
  599. hd_ena = duplex ? 0 : CC_HD;
  600. if (speed == 1000) {
  601. speed_cfg = 2;
  602. } else if (speed == 100) {
  603. speed_cfg = 1;
  604. } else if (speed == 10) {
  605. speed_cfg = 0;
  606. } else {
  607. pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
  608. return -1;
  609. }
  610. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  611. cmdcfg &= ~(CC_ES_MASK | CC_HD);
  612. cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
  613. printf("Change GMAC speed to %dMB\n", speed);
  614. debug("GMAC speed cfg 0x%x\n", cmdcfg);
  615. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  616. return 0;
  617. }
  618. int gmac_set_mac_addr(unsigned char *mac)
  619. {
  620. /* set our local address */
  621. debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
  622. mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  623. writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
  624. writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
  625. return 0;
  626. }
  627. int gmac_mac_init(struct eth_device *dev)
  628. {
  629. struct eth_info *eth = (struct eth_info *)(dev->priv);
  630. struct eth_dma *dma = &(eth->dma);
  631. uint32_t tmp;
  632. uint32_t cmdcfg;
  633. int chipid;
  634. debug("%s enter\n", __func__);
  635. /* Always use GMAC0 */
  636. printf("Using GMAC%d\n", 0);
  637. /* Reset AMAC0 core */
  638. writel(0, AMAC0_IDM_RESET_ADDR);
  639. tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
  640. /* Set clock */
  641. tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
  642. tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
  643. /* Set Tx clock */
  644. tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
  645. writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
  646. /* reset gmac */
  647. /*
  648. * As AMAC is just reset, NO need?
  649. * set eth_data into loopback mode to ensure no rx traffic
  650. * gmac_loopback(eth_data, TRUE);
  651. * ET_TRACE(("%s gmac loopback\n", __func__));
  652. * udelay(1);
  653. */
  654. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  655. cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
  656. CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
  657. CC_PAD_EN | CC_PF);
  658. cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
  659. /* put mac in reset */
  660. gmac_init_reset();
  661. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  662. gmac_clear_reset();
  663. /* enable clear MIB on read */
  664. reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
  665. /* PHY: set smi_master to drive mdc_clk */
  666. reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
  667. /* clear persistent sw intstatus */
  668. writel(0, GMAC0_INT_STATUS_ADDR);
  669. if (dma_init(dma) < 0) {
  670. pr_err("%s: GMAC dma_init failed\n", __func__);
  671. goto err_exit;
  672. }
  673. chipid = CHIPID;
  674. printf("%s: Chip ID: 0x%x\n", __func__, chipid);
  675. /* set switch bypass mode */
  676. tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
  677. tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
  678. /* Switch mode */
  679. /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
  680. writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
  681. tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
  682. tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
  683. writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
  684. /* Set MDIO to internal GPHY */
  685. tmp = readl(GMAC_MII_CTRL_ADDR);
  686. /* Select internal MDC/MDIO bus*/
  687. tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
  688. /* select MDC/MDIO connecting to on-chip internal PHYs */
  689. tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
  690. /*
  691. * give bit[6:0](MDCDIV) with required divisor to set
  692. * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
  693. */
  694. tmp |= 0x1A;
  695. writel(tmp, GMAC_MII_CTRL_ADDR);
  696. if (gmac_mii_busywait(1000)) {
  697. pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
  698. goto err_exit;
  699. }
  700. /* Configure GMAC0 */
  701. /* enable one rx interrupt per received frame */
  702. writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
  703. /* read command config reg */
  704. cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
  705. /* enable 802.3x tx flow control (honor received PAUSE frames) */
  706. cmdcfg &= ~CC_RPI;
  707. /* enable promiscuous mode */
  708. cmdcfg |= CC_PROM;
  709. /* Disable loopback mode */
  710. cmdcfg &= ~CC_ML;
  711. /* set the speed */
  712. cmdcfg &= ~(CC_ES_MASK | CC_HD);
  713. /* Set to 1Gbps and full duplex by default */
  714. cmdcfg |= (2 << CC_ES_SHIFT);
  715. /* put mac in reset */
  716. gmac_init_reset();
  717. /* write register */
  718. writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
  719. /* bring mac out of reset */
  720. gmac_clear_reset();
  721. /* set max frame lengths; account for possible vlan tag */
  722. writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
  723. return 0;
  724. err_exit:
  725. dma_deinit(dma);
  726. return -1;
  727. }
  728. int gmac_add(struct eth_device *dev)
  729. {
  730. struct eth_info *eth = (struct eth_info *)(dev->priv);
  731. struct eth_dma *dma = &(eth->dma);
  732. void *tmp;
  733. /*
  734. * Desc has to be 16-byte aligned. But for dcache flush it must be
  735. * aligned to ARCH_DMA_MINALIGN.
  736. */
  737. tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
  738. if (tmp == NULL) {
  739. printf("%s: Failed to allocate TX desc Buffer\n", __func__);
  740. return -1;
  741. }
  742. dma->tx_desc_aligned = (void *)tmp;
  743. debug("TX Descriptor Buffer: %p; length: 0x%x\n",
  744. dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
  745. tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
  746. if (tmp == NULL) {
  747. printf("%s: Failed to allocate TX Data Buffer\n", __func__);
  748. free(dma->tx_desc_aligned);
  749. return -1;
  750. }
  751. dma->tx_buf = (uint8_t *)tmp;
  752. debug("TX Data Buffer: %p; length: 0x%x\n",
  753. dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
  754. /* Desc has to be 16-byte aligned */
  755. tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
  756. if (tmp == NULL) {
  757. printf("%s: Failed to allocate RX Descriptor\n", __func__);
  758. free(dma->tx_desc_aligned);
  759. free(dma->tx_buf);
  760. return -1;
  761. }
  762. dma->rx_desc_aligned = (void *)tmp;
  763. debug("RX Descriptor Buffer: %p, length: 0x%x\n",
  764. dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
  765. tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
  766. if (tmp == NULL) {
  767. printf("%s: Failed to allocate RX Data Buffer\n", __func__);
  768. free(dma->tx_desc_aligned);
  769. free(dma->tx_buf);
  770. free(dma->rx_desc_aligned);
  771. return -1;
  772. }
  773. dma->rx_buf = (uint8_t *)tmp;
  774. debug("RX Data Buffer: %p; length: 0x%x\n",
  775. dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
  776. g_dmactrlflags = 0;
  777. eth->phy_interface = PHY_INTERFACE_MODE_GMII;
  778. dma->tx_packet = gmac_tx_packet;
  779. dma->check_tx_done = gmac_check_tx_done;
  780. dma->check_rx_done = gmac_check_rx_done;
  781. dma->enable_dma = gmac_enable_dma;
  782. dma->disable_dma = gmac_disable_dma;
  783. eth->miiphy_read = gmac_miiphy_read;
  784. eth->miiphy_write = gmac_miiphy_write;
  785. eth->mac_init = gmac_mac_init;
  786. eth->disable_mac = gmac_disable;
  787. eth->enable_mac = gmac_enable;
  788. eth->set_mac_addr = gmac_set_mac_addr;
  789. eth->set_mac_speed = gmac_set_speed;
  790. return 0;
  791. }