mvgbe.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2009
  4. * Marvell Semiconductor <www.marvell.com>
  5. * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
  6. *
  7. * (C) Copyright 2003
  8. * Ingo Assmus <ingo.assmus@keymile.com>
  9. *
  10. * based on - Driver for MV64360X ethernet ports
  11. * Copyright (C) 2002 rabeeh@galileo.co.il
  12. */
  13. #include <common.h>
  14. #include <dm.h>
  15. #include <net.h>
  16. #include <malloc.h>
  17. #include <miiphy.h>
  18. #include <wait_bit.h>
  19. #include <asm/io.h>
  20. #include <linux/errno.h>
  21. #include <asm/types.h>
  22. #include <asm/system.h>
  23. #include <asm/byteorder.h>
  24. #include <asm/arch/cpu.h>
  25. #if defined(CONFIG_ARCH_KIRKWOOD)
  26. #include <asm/arch/soc.h>
  27. #elif defined(CONFIG_ARCH_ORION5X)
  28. #include <asm/arch/orion5x.h>
  29. #endif
  30. #include "mvgbe.h"
  31. DECLARE_GLOBAL_DATA_PTR;
  32. #ifndef CONFIG_MVGBE_PORTS
  33. # define CONFIG_MVGBE_PORTS {0, 0}
  34. #endif
  35. #define MV_PHY_ADR_REQUEST 0xee
  36. #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
  37. #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  38. static int smi_wait_ready(struct mvgbe_device *dmvgbe)
  39. {
  40. int ret;
  41. ret = wait_for_bit_le32(&MVGBE_SMI_REG, MVGBE_PHY_SMI_BUSY_MASK, false,
  42. MVGBE_PHY_SMI_TIMEOUT_MS, false);
  43. if (ret) {
  44. printf("Error: SMI busy timeout\n");
  45. return ret;
  46. }
  47. return 0;
  48. }
  49. static int __mvgbe_mdio_read(struct mvgbe_device *dmvgbe, int phy_adr,
  50. int devad, int reg_ofs)
  51. {
  52. struct mvgbe_registers *regs = dmvgbe->regs;
  53. u32 smi_reg;
  54. u32 timeout;
  55. u16 data = 0;
  56. /* Phyadr read request */
  57. if (phy_adr == MV_PHY_ADR_REQUEST &&
  58. reg_ofs == MV_PHY_ADR_REQUEST) {
  59. /* */
  60. data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
  61. return data;
  62. }
  63. /* check parameters */
  64. if (phy_adr > PHYADR_MASK) {
  65. printf("Err..(%s) Invalid PHY address %d\n",
  66. __func__, phy_adr);
  67. return -EFAULT;
  68. }
  69. if (reg_ofs > PHYREG_MASK) {
  70. printf("Err..(%s) Invalid register offset %d\n",
  71. __func__, reg_ofs);
  72. return -EFAULT;
  73. }
  74. /* wait till the SMI is not busy */
  75. if (smi_wait_ready(dmvgbe) < 0)
  76. return -EFAULT;
  77. /* fill the phy address and regiser offset and read opcode */
  78. smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  79. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
  80. | MVGBE_PHY_SMI_OPCODE_READ;
  81. /* write the smi register */
  82. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  83. /*wait till read value is ready */
  84. timeout = MVGBE_PHY_SMI_TIMEOUT;
  85. do {
  86. /* read smi register */
  87. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  88. if (timeout-- == 0) {
  89. printf("Err..(%s) SMI read ready timeout\n",
  90. __func__);
  91. return -EFAULT;
  92. }
  93. } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
  94. /* Wait for the data to update in the SMI register */
  95. for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
  96. ;
  97. data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
  98. debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
  99. data);
  100. return data;
  101. }
  102. /*
  103. * smi_reg_read - miiphy_read callback function.
  104. *
  105. * Returns 16bit phy register value, or -EFAULT on error
  106. */
  107. static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
  108. int reg_ofs)
  109. {
  110. #ifdef CONFIG_DM_ETH
  111. struct mvgbe_device *dmvgbe = bus->priv;
  112. #else
  113. struct eth_device *dev = eth_get_dev_by_name(bus->name);
  114. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  115. #endif
  116. return __mvgbe_mdio_read(dmvgbe, phy_adr, devad, reg_ofs);
  117. }
  118. static int __mvgbe_mdio_write(struct mvgbe_device *dmvgbe, int phy_adr,
  119. int devad, int reg_ofs, u16 data)
  120. {
  121. struct mvgbe_registers *regs = dmvgbe->regs;
  122. u32 smi_reg;
  123. /* Phyadr write request*/
  124. if (phy_adr == MV_PHY_ADR_REQUEST &&
  125. reg_ofs == MV_PHY_ADR_REQUEST) {
  126. MVGBE_REG_WR(regs->phyadr, data);
  127. return 0;
  128. }
  129. /* check parameters */
  130. if (phy_adr > PHYADR_MASK) {
  131. printf("Err..(%s) Invalid phy address\n", __func__);
  132. return -EINVAL;
  133. }
  134. if (reg_ofs > PHYREG_MASK) {
  135. printf("Err..(%s) Invalid register offset\n", __func__);
  136. return -EFAULT;
  137. }
  138. /* wait till the SMI is not busy */
  139. if (smi_wait_ready(dmvgbe) < 0)
  140. return -EFAULT;
  141. /* fill the phy addr and reg offset and write opcode and data */
  142. smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
  143. smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  144. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
  145. smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
  146. /* write the smi register */
  147. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  148. return 0;
  149. }
  150. /*
  151. * smi_reg_write - miiphy_write callback function.
  152. *
  153. * Returns 0 if write succeed, -EFAULT on error
  154. */
  155. static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
  156. int reg_ofs, u16 data)
  157. {
  158. #ifdef CONFIG_DM_ETH
  159. struct mvgbe_device *dmvgbe = bus->priv;
  160. #else
  161. struct eth_device *dev = eth_get_dev_by_name(bus->name);
  162. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  163. #endif
  164. return __mvgbe_mdio_write(dmvgbe, phy_adr, devad, reg_ofs, data);
  165. }
  166. #endif
  167. /* Stop and checks all queues */
  168. static void stop_queue(u32 * qreg)
  169. {
  170. u32 reg_data;
  171. reg_data = readl(qreg);
  172. if (reg_data & 0xFF) {
  173. /* Issue stop command for active channels only */
  174. writel((reg_data << 8), qreg);
  175. /* Wait for all queue activity to terminate. */
  176. do {
  177. /*
  178. * Check port cause register that all queues
  179. * are stopped
  180. */
  181. reg_data = readl(qreg);
  182. }
  183. while (reg_data & 0xFF);
  184. }
  185. }
  186. /*
  187. * set_access_control - Config address decode parameters for Ethernet unit
  188. *
  189. * This function configures the address decode parameters for the Gigabit
  190. * Ethernet Controller according the given parameters struct.
  191. *
  192. * @regs Register struct pointer.
  193. * @param Address decode parameter struct.
  194. */
  195. static void set_access_control(struct mvgbe_registers *regs,
  196. struct mvgbe_winparam *param)
  197. {
  198. u32 access_prot_reg;
  199. /* Set access control register */
  200. access_prot_reg = MVGBE_REG_RD(regs->epap);
  201. /* clear window permission */
  202. access_prot_reg &= (~(3 << (param->win * 2)));
  203. access_prot_reg |= (param->access_ctrl << (param->win * 2));
  204. MVGBE_REG_WR(regs->epap, access_prot_reg);
  205. /* Set window Size reg (SR) */
  206. MVGBE_REG_WR(regs->barsz[param->win].size,
  207. (((param->size / 0x10000) - 1) << 16));
  208. /* Set window Base address reg (BA) */
  209. MVGBE_REG_WR(regs->barsz[param->win].bar,
  210. (param->target | param->attrib | param->base_addr));
  211. /* High address remap reg (HARR) */
  212. if (param->win < 4)
  213. MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
  214. /* Base address enable reg (BARER) */
  215. if (param->enable == 1)
  216. MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
  217. else
  218. MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
  219. }
  220. static void set_dram_access(struct mvgbe_registers *regs)
  221. {
  222. struct mvgbe_winparam win_param;
  223. int i;
  224. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  225. /* Set access parameters for DRAM bank i */
  226. win_param.win = i; /* Use Ethernet window i */
  227. /* Window target - DDR */
  228. win_param.target = MVGBE_TARGET_DRAM;
  229. /* Enable full access */
  230. win_param.access_ctrl = EWIN_ACCESS_FULL;
  231. win_param.high_addr = 0;
  232. /* Get bank base and size */
  233. win_param.base_addr = gd->bd->bi_dram[i].start;
  234. win_param.size = gd->bd->bi_dram[i].size;
  235. if (win_param.size == 0)
  236. win_param.enable = 0;
  237. else
  238. win_param.enable = 1; /* Enable the access */
  239. /* Enable DRAM bank */
  240. switch (i) {
  241. case 0:
  242. win_param.attrib = EBAR_DRAM_CS0;
  243. break;
  244. case 1:
  245. win_param.attrib = EBAR_DRAM_CS1;
  246. break;
  247. case 2:
  248. win_param.attrib = EBAR_DRAM_CS2;
  249. break;
  250. case 3:
  251. win_param.attrib = EBAR_DRAM_CS3;
  252. break;
  253. default:
  254. /* invalid bank, disable access */
  255. win_param.enable = 0;
  256. win_param.attrib = 0;
  257. break;
  258. }
  259. /* Set the access control for address window(EPAPR) RD/WR */
  260. set_access_control(regs, &win_param);
  261. }
  262. }
  263. /*
  264. * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
  265. *
  266. * Go through all the DA filter tables (Unicast, Special Multicast & Other
  267. * Multicast) and set each entry to 0.
  268. */
  269. static void port_init_mac_tables(struct mvgbe_registers *regs)
  270. {
  271. int table_index;
  272. /* Clear DA filter unicast table (Ex_dFUT) */
  273. for (table_index = 0; table_index < 4; ++table_index)
  274. MVGBE_REG_WR(regs->dfut[table_index], 0);
  275. for (table_index = 0; table_index < 64; ++table_index) {
  276. /* Clear DA filter special multicast table (Ex_dFSMT) */
  277. MVGBE_REG_WR(regs->dfsmt[table_index], 0);
  278. /* Clear DA filter other multicast table (Ex_dFOMT) */
  279. MVGBE_REG_WR(regs->dfomt[table_index], 0);
  280. }
  281. }
  282. /*
  283. * port_uc_addr - This function Set the port unicast address table
  284. *
  285. * This function locates the proper entry in the Unicast table for the
  286. * specified MAC nibble and sets its properties according to function
  287. * parameters.
  288. * This function add/removes MAC addresses from the port unicast address
  289. * table.
  290. *
  291. * @uc_nibble Unicast MAC Address last nibble.
  292. * @option 0 = Add, 1 = remove address.
  293. *
  294. * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
  295. */
  296. static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
  297. int option)
  298. {
  299. u32 unicast_reg;
  300. u32 tbl_offset;
  301. u32 reg_offset;
  302. /* Locate the Unicast table entry */
  303. uc_nibble = (0xf & uc_nibble);
  304. /* Register offset from unicast table base */
  305. tbl_offset = (uc_nibble / 4);
  306. /* Entry offset within the above register */
  307. reg_offset = uc_nibble % 4;
  308. switch (option) {
  309. case REJECT_MAC_ADDR:
  310. /*
  311. * Clear accepts frame bit at specified unicast
  312. * DA table entry
  313. */
  314. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  315. unicast_reg &= (0xFF << (8 * reg_offset));
  316. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  317. break;
  318. case ACCEPT_MAC_ADDR:
  319. /* Set accepts frame bit at unicast DA filter table entry */
  320. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  321. unicast_reg &= (0xFF << (8 * reg_offset));
  322. unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
  323. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  324. break;
  325. default:
  326. return 0;
  327. }
  328. return 1;
  329. }
  330. /*
  331. * port_uc_addr_set - This function Set the port Unicast address.
  332. */
  333. static void port_uc_addr_set(struct mvgbe_device *dmvgbe, u8 *p_addr)
  334. {
  335. struct mvgbe_registers *regs = dmvgbe->regs;
  336. u32 mac_h;
  337. u32 mac_l;
  338. mac_l = (p_addr[4] << 8) | (p_addr[5]);
  339. mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
  340. (p_addr[3] << 0);
  341. MVGBE_REG_WR(regs->macal, mac_l);
  342. MVGBE_REG_WR(regs->macah, mac_h);
  343. /* Accept frames of this address */
  344. port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
  345. }
  346. /*
  347. * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
  348. */
  349. static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
  350. {
  351. struct mvgbe_rxdesc *p_rx_desc;
  352. int i;
  353. /* initialize the Rx descriptors ring */
  354. p_rx_desc = dmvgbe->p_rxdesc;
  355. for (i = 0; i < RINGSZ; i++) {
  356. p_rx_desc->cmd_sts =
  357. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  358. p_rx_desc->buf_size = PKTSIZE_ALIGN;
  359. p_rx_desc->byte_cnt = 0;
  360. p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
  361. if (i == (RINGSZ - 1))
  362. p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
  363. else {
  364. p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
  365. ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
  366. p_rx_desc = p_rx_desc->nxtdesc_p;
  367. }
  368. }
  369. dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
  370. }
  371. static int __mvgbe_init(struct mvgbe_device *dmvgbe, u8 *enetaddr,
  372. const char *name)
  373. {
  374. struct mvgbe_registers *regs = dmvgbe->regs;
  375. #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
  376. !defined(CONFIG_PHYLIB) && \
  377. !defined(CONFIG_DM_ETH) && \
  378. defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  379. int i;
  380. #endif
  381. /* setup RX rings */
  382. mvgbe_init_rx_desc_ring(dmvgbe);
  383. /* Clear the ethernet port interrupts */
  384. MVGBE_REG_WR(regs->ic, 0);
  385. MVGBE_REG_WR(regs->ice, 0);
  386. /* Unmask RX buffer and TX end interrupt */
  387. MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
  388. /* Unmask phy and link status changes interrupts */
  389. MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
  390. set_dram_access(regs);
  391. port_init_mac_tables(regs);
  392. port_uc_addr_set(dmvgbe, enetaddr);
  393. /* Assign port configuration and command. */
  394. MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
  395. MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
  396. MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
  397. /* Assign port SDMA configuration */
  398. MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
  399. MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
  400. MVGBE_REG_WR(regs->tqx[0].tqxtbc,
  401. (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
  402. /* Turn off the port/RXUQ bandwidth limitation */
  403. MVGBE_REG_WR(regs->pmtu, 0);
  404. /* Set maximum receive buffer to 9700 bytes */
  405. MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
  406. | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
  407. /* Enable port initially */
  408. MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  409. /*
  410. * Set ethernet MTU for leaky bucket mechanism to 0 - this will
  411. * disable the leaky bucket mechanism .
  412. */
  413. MVGBE_REG_WR(regs->pmtu, 0);
  414. /* Assignment of Rx CRDB of given RXUQ */
  415. MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
  416. /* ensure previous write is done before enabling Rx DMA */
  417. isb();
  418. /* Enable port Rx. */
  419. MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
  420. #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
  421. !defined(CONFIG_PHYLIB) && \
  422. !defined(CONFIG_DM_ETH) && \
  423. defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  424. /* Wait up to 5s for the link status */
  425. for (i = 0; i < 5; i++) {
  426. u16 phyadr;
  427. miiphy_read(name, MV_PHY_ADR_REQUEST,
  428. MV_PHY_ADR_REQUEST, &phyadr);
  429. /* Return if we get link up */
  430. if (miiphy_link(name, phyadr))
  431. return 0;
  432. udelay(1000000);
  433. }
  434. printf("No link on %s\n", name);
  435. return -1;
  436. #endif
  437. return 0;
  438. }
  439. #ifndef CONFIG_DM_ETH
  440. static int mvgbe_init(struct eth_device *dev)
  441. {
  442. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  443. return __mvgbe_init(dmvgbe, dmvgbe->dev.enetaddr, dmvgbe->dev.name);
  444. }
  445. #endif
  446. static void __mvgbe_halt(struct mvgbe_device *dmvgbe)
  447. {
  448. struct mvgbe_registers *regs = dmvgbe->regs;
  449. /* Disable all gigE address decoder */
  450. MVGBE_REG_WR(regs->bare, 0x3f);
  451. stop_queue(&regs->tqc);
  452. stop_queue(&regs->rqc);
  453. /* Disable port */
  454. MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  455. /* Set port is not reset */
  456. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
  457. #ifdef CONFIG_SYS_MII_MODE
  458. /* Set MMI interface up */
  459. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
  460. #endif
  461. /* Disable & mask ethernet port interrupts */
  462. MVGBE_REG_WR(regs->ic, 0);
  463. MVGBE_REG_WR(regs->ice, 0);
  464. MVGBE_REG_WR(regs->pim, 0);
  465. MVGBE_REG_WR(regs->peim, 0);
  466. }
  467. #ifndef CONFIG_DM_ETH
  468. static int mvgbe_halt(struct eth_device *dev)
  469. {
  470. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  471. __mvgbe_halt(dmvgbe);
  472. return 0;
  473. }
  474. #endif
  475. #ifdef CONFIG_DM_ETH
  476. static int mvgbe_write_hwaddr(struct udevice *dev)
  477. {
  478. struct eth_pdata *pdata = dev_get_platdata(dev);
  479. port_uc_addr_set(dev_get_priv(dev), pdata->enetaddr);
  480. return 0;
  481. }
  482. #else
  483. static int mvgbe_write_hwaddr(struct eth_device *dev)
  484. {
  485. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  486. /* Programs net device MAC address after initialization */
  487. port_uc_addr_set(dmvgbe, dmvgbe->dev.enetaddr);
  488. return 0;
  489. }
  490. #endif
  491. static int __mvgbe_send(struct mvgbe_device *dmvgbe, void *dataptr,
  492. int datasize)
  493. {
  494. struct mvgbe_registers *regs = dmvgbe->regs;
  495. struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
  496. void *p = (void *)dataptr;
  497. u32 cmd_sts;
  498. u32 txuq0_reg_addr;
  499. /* Copy buffer if it's misaligned */
  500. if ((u32) dataptr & 0x07) {
  501. if (datasize > PKTSIZE_ALIGN) {
  502. printf("Non-aligned data too large (%d)\n",
  503. datasize);
  504. return -1;
  505. }
  506. memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
  507. p = dmvgbe->p_aligned_txbuf;
  508. }
  509. p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
  510. p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
  511. p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
  512. p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
  513. p_txdesc->buf_ptr = (u8 *) p;
  514. p_txdesc->byte_cnt = datasize;
  515. /* Set this tc desc as zeroth TXUQ */
  516. txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
  517. writel((u32) p_txdesc, txuq0_reg_addr);
  518. /* ensure tx desc writes above are performed before we start Tx DMA */
  519. isb();
  520. /* Apply send command using zeroth TXUQ */
  521. MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
  522. /*
  523. * wait for packet xmit completion
  524. */
  525. cmd_sts = readl(&p_txdesc->cmd_sts);
  526. while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
  527. /* return fail if error is detected */
  528. if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
  529. (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
  530. cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
  531. printf("Err..(%s) in xmit packet\n", __func__);
  532. return -1;
  533. }
  534. cmd_sts = readl(&p_txdesc->cmd_sts);
  535. };
  536. return 0;
  537. }
  538. #ifndef CONFIG_DM_ETH
  539. static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
  540. {
  541. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  542. return __mvgbe_send(dmvgbe, dataptr, datasize);
  543. }
  544. #endif
  545. static int __mvgbe_recv(struct mvgbe_device *dmvgbe, uchar **packetp)
  546. {
  547. struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
  548. u32 cmd_sts;
  549. u32 timeout = 0;
  550. u32 rxdesc_curr_addr;
  551. unsigned char *data;
  552. int rx_bytes = 0;
  553. *packetp = NULL;
  554. /* wait untill rx packet available or timeout */
  555. do {
  556. if (timeout < MVGBE_PHY_SMI_TIMEOUT)
  557. timeout++;
  558. else {
  559. debug("%s time out...\n", __func__);
  560. return -1;
  561. }
  562. } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
  563. if (p_rxdesc_curr->byte_cnt != 0) {
  564. debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
  565. __func__, (u32) p_rxdesc_curr->byte_cnt,
  566. (u32) p_rxdesc_curr->buf_ptr,
  567. (u32) p_rxdesc_curr->cmd_sts);
  568. }
  569. /*
  570. * In case received a packet without first/last bits on
  571. * OR the error summary bit is on,
  572. * the packets needs to be dropeed.
  573. */
  574. cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
  575. if ((cmd_sts &
  576. (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
  577. != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
  578. printf("Err..(%s) Dropping packet spread on"
  579. " multiple descriptors\n", __func__);
  580. } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
  581. printf("Err..(%s) Dropping packet with errors\n",
  582. __func__);
  583. } else {
  584. /* !!! call higher layer processing */
  585. debug("%s: Sending Received packet to"
  586. " upper layer (net_process_received_packet)\n",
  587. __func__);
  588. data = (p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET);
  589. rx_bytes = (int)(p_rxdesc_curr->byte_cnt -
  590. RX_BUF_OFFSET);
  591. *packetp = data;
  592. }
  593. /*
  594. * free these descriptors and point next in the ring
  595. */
  596. p_rxdesc_curr->cmd_sts =
  597. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  598. p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
  599. p_rxdesc_curr->byte_cnt = 0;
  600. rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
  601. writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
  602. return rx_bytes;
  603. }
  604. #ifndef CONFIG_DM_ETH
  605. static int mvgbe_recv(struct eth_device *dev)
  606. {
  607. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  608. uchar *packet;
  609. int ret;
  610. ret = __mvgbe_recv(dmvgbe, &packet);
  611. if (ret < 0)
  612. return ret;
  613. net_process_received_packet(packet, ret);
  614. return 0;
  615. }
  616. #endif
  617. #if defined(CONFIG_PHYLIB) || defined(CONFIG_DM_ETH)
  618. #if defined(CONFIG_DM_ETH)
  619. static struct phy_device *__mvgbe_phy_init(struct udevice *dev,
  620. struct mii_dev *bus,
  621. phy_interface_t phy_interface,
  622. int phyid)
  623. #else
  624. static struct phy_device *__mvgbe_phy_init(struct eth_device *dev,
  625. struct mii_dev *bus,
  626. phy_interface_t phy_interface,
  627. int phyid)
  628. #endif
  629. {
  630. struct phy_device *phydev;
  631. /* Set phy address of the port */
  632. miiphy_write(dev->name, MV_PHY_ADR_REQUEST, MV_PHY_ADR_REQUEST,
  633. phyid);
  634. phydev = phy_connect(bus, phyid, dev, phy_interface);
  635. if (!phydev) {
  636. printf("phy_connect failed\n");
  637. return NULL;
  638. }
  639. phy_config(phydev);
  640. phy_startup(phydev);
  641. return phydev;
  642. }
  643. #endif /* CONFIG_PHYLIB || CONFIG_DM_ETH */
  644. #if defined(CONFIG_PHYLIB) && !defined(CONFIG_DM_ETH)
  645. int mvgbe_phylib_init(struct eth_device *dev, int phyid)
  646. {
  647. struct mii_dev *bus;
  648. struct phy_device *phydev;
  649. int ret;
  650. bus = mdio_alloc();
  651. if (!bus) {
  652. printf("mdio_alloc failed\n");
  653. return -ENOMEM;
  654. }
  655. bus->read = smi_reg_read;
  656. bus->write = smi_reg_write;
  657. strcpy(bus->name, dev->name);
  658. ret = mdio_register(bus);
  659. if (ret) {
  660. printf("mdio_register failed\n");
  661. free(bus);
  662. return -ENOMEM;
  663. }
  664. phydev = __mvgbe_phy_init(dev, bus, PHY_INTERFACE_MODE_RGMII, phyid);
  665. if (!phydev)
  666. return -ENODEV;
  667. return 0;
  668. }
  669. #endif
  670. static int mvgbe_alloc_buffers(struct mvgbe_device *dmvgbe)
  671. {
  672. dmvgbe->p_rxdesc = memalign(PKTALIGN,
  673. MV_RXQ_DESC_ALIGNED_SIZE * RINGSZ + 1);
  674. if (!dmvgbe->p_rxdesc)
  675. goto error1;
  676. dmvgbe->p_rxbuf = memalign(PKTALIGN,
  677. RINGSZ * PKTSIZE_ALIGN + 1);
  678. if (!dmvgbe->p_rxbuf)
  679. goto error2;
  680. dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
  681. if (!dmvgbe->p_aligned_txbuf)
  682. goto error3;
  683. dmvgbe->p_txdesc = memalign(PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
  684. if (!dmvgbe->p_txdesc)
  685. goto error4;
  686. return 0;
  687. error4:
  688. free(dmvgbe->p_aligned_txbuf);
  689. error3:
  690. free(dmvgbe->p_rxbuf);
  691. error2:
  692. free(dmvgbe->p_rxdesc);
  693. error1:
  694. return -ENOMEM;
  695. }
  696. #ifndef CONFIG_DM_ETH
  697. int mvgbe_initialize(bd_t *bis)
  698. {
  699. struct mvgbe_device *dmvgbe;
  700. struct eth_device *dev;
  701. int devnum;
  702. int ret;
  703. u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
  704. for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
  705. /*skip if port is configured not to use */
  706. if (used_ports[devnum] == 0)
  707. continue;
  708. dmvgbe = malloc(sizeof(struct mvgbe_device));
  709. if (!dmvgbe)
  710. return -ENOMEM;
  711. memset(dmvgbe, 0, sizeof(struct mvgbe_device));
  712. ret = mvgbe_alloc_buffers(dmvgbe);
  713. if (ret) {
  714. printf("Err.. %s Failed to allocate memory\n",
  715. __func__);
  716. free(dmvgbe);
  717. return ret;
  718. }
  719. dev = &dmvgbe->dev;
  720. /* must be less than sizeof(dev->name) */
  721. sprintf(dev->name, "egiga%d", devnum);
  722. switch (devnum) {
  723. case 0:
  724. dmvgbe->regs = (void *)MVGBE0_BASE;
  725. break;
  726. #if defined(MVGBE1_BASE)
  727. case 1:
  728. dmvgbe->regs = (void *)MVGBE1_BASE;
  729. break;
  730. #endif
  731. default: /* this should never happen */
  732. printf("Err..(%s) Invalid device number %d\n",
  733. __func__, devnum);
  734. return -1;
  735. }
  736. dev->init = (void *)mvgbe_init;
  737. dev->halt = (void *)mvgbe_halt;
  738. dev->send = (void *)mvgbe_send;
  739. dev->recv = (void *)mvgbe_recv;
  740. dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
  741. eth_register(dev);
  742. #if defined(CONFIG_PHYLIB)
  743. mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
  744. #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  745. int retval;
  746. struct mii_dev *mdiodev = mdio_alloc();
  747. if (!mdiodev)
  748. return -ENOMEM;
  749. strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
  750. mdiodev->read = smi_reg_read;
  751. mdiodev->write = smi_reg_write;
  752. retval = mdio_register(mdiodev);
  753. if (retval < 0)
  754. return retval;
  755. /* Set phy address of the port */
  756. miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
  757. MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
  758. #endif
  759. }
  760. return 0;
  761. }
  762. #endif
  763. #ifdef CONFIG_DM_ETH
  764. static int mvgbe_port_is_fixed_link(struct mvgbe_device *dmvgbe)
  765. {
  766. return dmvgbe->phyaddr > PHY_MAX_ADDR;
  767. }
  768. static int mvgbe_start(struct udevice *dev)
  769. {
  770. struct eth_pdata *pdata = dev_get_platdata(dev);
  771. struct mvgbe_device *dmvgbe = dev_get_priv(dev);
  772. int ret;
  773. ret = __mvgbe_init(dmvgbe, pdata->enetaddr, dev->name);
  774. if (ret)
  775. return ret;
  776. if (!mvgbe_port_is_fixed_link(dmvgbe)) {
  777. dmvgbe->phydev = __mvgbe_phy_init(dev, dmvgbe->bus,
  778. dmvgbe->phy_interface,
  779. dmvgbe->phyaddr);
  780. if (!dmvgbe->phydev)
  781. return -ENODEV;
  782. }
  783. return 0;
  784. }
  785. static int mvgbe_send(struct udevice *dev, void *packet, int length)
  786. {
  787. struct mvgbe_device *dmvgbe = dev_get_priv(dev);
  788. return __mvgbe_send(dmvgbe, packet, length);
  789. }
  790. static int mvgbe_recv(struct udevice *dev, int flags, uchar **packetp)
  791. {
  792. struct mvgbe_device *dmvgbe = dev_get_priv(dev);
  793. return __mvgbe_recv(dmvgbe, packetp);
  794. }
  795. static void mvgbe_stop(struct udevice *dev)
  796. {
  797. struct mvgbe_device *dmvgbe = dev_get_priv(dev);
  798. __mvgbe_halt(dmvgbe);
  799. }
  800. static int mvgbe_probe(struct udevice *dev)
  801. {
  802. struct eth_pdata *pdata = dev_get_platdata(dev);
  803. struct mvgbe_device *dmvgbe = dev_get_priv(dev);
  804. struct mii_dev *bus;
  805. int ret;
  806. ret = mvgbe_alloc_buffers(dmvgbe);
  807. if (ret)
  808. return ret;
  809. dmvgbe->regs = (void __iomem *)pdata->iobase;
  810. bus = mdio_alloc();
  811. if (!bus) {
  812. printf("Failed to allocate MDIO bus\n");
  813. return -ENOMEM;
  814. }
  815. bus->read = smi_reg_read;
  816. bus->write = smi_reg_write;
  817. snprintf(bus->name, sizeof(bus->name), dev->name);
  818. bus->priv = dmvgbe;
  819. dmvgbe->bus = bus;
  820. ret = mdio_register(bus);
  821. if (ret < 0)
  822. return ret;
  823. return 0;
  824. }
  825. static const struct eth_ops mvgbe_ops = {
  826. .start = mvgbe_start,
  827. .send = mvgbe_send,
  828. .recv = mvgbe_recv,
  829. .stop = mvgbe_stop,
  830. .write_hwaddr = mvgbe_write_hwaddr,
  831. };
  832. static int mvgbe_ofdata_to_platdata(struct udevice *dev)
  833. {
  834. struct eth_pdata *pdata = dev_get_platdata(dev);
  835. struct mvgbe_device *dmvgbe = dev_get_priv(dev);
  836. void *blob = (void *)gd->fdt_blob;
  837. int node = dev_of_offset(dev);
  838. const char *phy_mode;
  839. int fl_node;
  840. int pnode;
  841. unsigned long addr;
  842. pdata->iobase = devfdt_get_addr(dev);
  843. pdata->phy_interface = -1;
  844. pnode = fdt_node_offset_by_compatible(blob, node,
  845. "marvell,kirkwood-eth-port");
  846. /* Get phy-mode / phy_interface from DT */
  847. phy_mode = fdt_getprop(gd->fdt_blob, pnode, "phy-mode", NULL);
  848. if (phy_mode)
  849. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  850. else
  851. pdata->phy_interface = PHY_INTERFACE_MODE_GMII;
  852. dmvgbe->phy_interface = pdata->phy_interface;
  853. /* fetch 'fixed-link' property */
  854. fl_node = fdt_subnode_offset(blob, pnode, "fixed-link");
  855. if (fl_node != -FDT_ERR_NOTFOUND) {
  856. /* set phy_addr to invalid value for fixed link */
  857. dmvgbe->phyaddr = PHY_MAX_ADDR + 1;
  858. dmvgbe->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
  859. dmvgbe->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
  860. } else {
  861. /* Now read phyaddr from DT */
  862. addr = fdtdec_lookup_phandle(blob, pnode, "phy-handle");
  863. if (addr > 0)
  864. dmvgbe->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
  865. }
  866. return 0;
  867. }
  868. static const struct udevice_id mvgbe_ids[] = {
  869. { .compatible = "marvell,kirkwood-eth" },
  870. { }
  871. };
  872. U_BOOT_DRIVER(mvgbe) = {
  873. .name = "mvgbe",
  874. .id = UCLASS_ETH,
  875. .of_match = mvgbe_ids,
  876. .ofdata_to_platdata = mvgbe_ofdata_to_platdata,
  877. .probe = mvgbe_probe,
  878. .ops = &mvgbe_ops,
  879. .priv_auto_alloc_size = sizeof(struct mvgbe_device),
  880. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  881. };
  882. #endif /* CONFIG_DM_ETH */