xilinx_axi_emac.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2021 Waymo LLC
  4. * Copyright (C) 2011 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2011 PetaLogix
  6. * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
  7. */
  8. #include <config.h>
  9. #include <common.h>
  10. #include <cpu_func.h>
  11. #include <dm.h>
  12. #include <log.h>
  13. #include <net.h>
  14. #include <malloc.h>
  15. #include <asm/global_data.h>
  16. #include <asm/io.h>
  17. #include <phy.h>
  18. #include <miiphy.h>
  19. #include <wait_bit.h>
  20. #include <linux/delay.h>
  21. DECLARE_GLOBAL_DATA_PTR;
  22. /* Link setup */
  23. #define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */
  24. #define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
  25. #define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
  26. #define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
  27. /* Interrupt Status/Enable/Mask Registers bit definitions */
  28. #define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
  29. #define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
  30. /* Receive Configuration Word 1 (RCW1) Register bit definitions */
  31. #define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
  32. /* Transmitter Configuration (TC) Register bit definitions */
  33. #define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
  34. #define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
  35. /* MDIO Management Configuration (MC) Register bit definitions */
  36. #define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable*/
  37. /* MDIO Management Control Register (MCR) Register bit definitions */
  38. #define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */
  39. #define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */
  40. #define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */
  41. #define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */
  42. #define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */
  43. #define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */
  44. #define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
  45. #define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
  46. #define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
  47. #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
  48. /* DMA macros */
  49. /* Bitmasks of XAXIDMA_CR_OFFSET register */
  50. #define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
  51. #define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
  52. /* Bitmasks of XAXIDMA_SR_OFFSET register */
  53. #define XAXIDMA_HALTED_MASK 0x00000001 /* DMA channel halted */
  54. /* Bitmask for interrupts */
  55. #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
  56. #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
  57. #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
  58. /* Bitmasks of XAXIDMA_BD_CTRL_OFFSET register */
  59. #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
  60. #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
  61. /* Bitmasks for XXV Ethernet MAC */
  62. #define XXV_TC_TX_MASK 0x00000001
  63. #define XXV_TC_FCS_MASK 0x00000002
  64. #define XXV_RCW1_RX_MASK 0x00000001
  65. #define XXV_RCW1_FCS_MASK 0x00000002
  66. #define DMAALIGN 128
  67. #define XXV_MIN_PKT_SIZE 60
  68. static u8 rxframe[PKTSIZE_ALIGN] __attribute((aligned(DMAALIGN)));
  69. static u8 txminframe[XXV_MIN_PKT_SIZE] __attribute((aligned(DMAALIGN)));
  70. enum emac_variant {
  71. EMAC_1G = 0,
  72. EMAC_10G_25G = 1,
  73. };
  74. /* Reflect dma offsets */
  75. struct axidma_reg {
  76. u32 control; /* DMACR */
  77. u32 status; /* DMASR */
  78. u32 current; /* CURDESC low 32 bit */
  79. u32 current_hi; /* CURDESC high 32 bit */
  80. u32 tail; /* TAILDESC low 32 bit */
  81. u32 tail_hi; /* TAILDESC high 32 bit */
  82. };
  83. /* Platform data structures */
  84. struct axidma_plat {
  85. struct eth_pdata eth_pdata;
  86. struct axidma_reg *dmatx;
  87. struct axidma_reg *dmarx;
  88. int phyaddr;
  89. u8 eth_hasnobuf;
  90. int phy_of_handle;
  91. enum emac_variant mactype;
  92. };
  93. /* Private driver structures */
  94. struct axidma_priv {
  95. struct axidma_reg *dmatx;
  96. struct axidma_reg *dmarx;
  97. int phyaddr;
  98. struct axi_regs *iobase;
  99. phy_interface_t interface;
  100. struct phy_device *phydev;
  101. struct mii_dev *bus;
  102. u8 eth_hasnobuf;
  103. int phy_of_handle;
  104. enum emac_variant mactype;
  105. };
  106. /* BD descriptors */
  107. struct axidma_bd {
  108. u32 next_desc; /* Next descriptor pointer */
  109. u32 next_desc_msb;
  110. u32 buf_addr; /* Buffer address */
  111. u32 buf_addr_msb;
  112. u32 reserved3;
  113. u32 reserved4;
  114. u32 cntrl; /* Control */
  115. u32 status; /* Status */
  116. u32 app0;
  117. u32 app1; /* TX start << 16 | insert */
  118. u32 app2; /* TX csum seed */
  119. u32 app3;
  120. u32 app4;
  121. u32 sw_id_offset;
  122. u32 reserved5;
  123. u32 reserved6;
  124. };
  125. /* Static BDs - driver uses only one BD */
  126. static struct axidma_bd tx_bd __attribute((aligned(DMAALIGN)));
  127. static struct axidma_bd rx_bd __attribute((aligned(DMAALIGN)));
  128. struct axi_regs {
  129. u32 reserved[3];
  130. u32 is; /* 0xC: Interrupt status */
  131. u32 reserved2;
  132. u32 ie; /* 0x14: Interrupt enable */
  133. u32 reserved3[251];
  134. u32 rcw1; /* 0x404: Rx Configuration Word 1 */
  135. u32 tc; /* 0x408: Tx Configuration */
  136. u32 reserved4;
  137. u32 emmc; /* 0x410: EMAC mode configuration */
  138. u32 reserved5[59];
  139. u32 mdio_mc; /* 0x500: MII Management Config */
  140. u32 mdio_mcr; /* 0x504: MII Management Control */
  141. u32 mdio_mwd; /* 0x508: MII Management Write Data */
  142. u32 mdio_mrd; /* 0x50C: MII Management Read Data */
  143. u32 reserved6[124];
  144. u32 uaw0; /* 0x700: Unicast address word 0 */
  145. u32 uaw1; /* 0x704: Unicast address word 1 */
  146. };
  147. struct xxv_axi_regs {
  148. u32 gt_reset; /* 0x0 */
  149. u32 reserved[2];
  150. u32 tc; /* 0xC: Tx Configuration */
  151. u32 reserved2;
  152. u32 rcw1; /* 0x14: Rx Configuration Word 1 */
  153. };
  154. /* Use MII register 1 (MII status register) to detect PHY */
  155. #define PHY_DETECT_REG 1
  156. /*
  157. * Mask used to verify certain PHY features (or register contents)
  158. * in the register above:
  159. * 0x1000: 10Mbps full duplex support
  160. * 0x0800: 10Mbps half duplex support
  161. * 0x0008: Auto-negotiation support
  162. */
  163. #define PHY_DETECT_MASK 0x1808
  164. static inline int mdio_wait(struct axi_regs *regs)
  165. {
  166. u32 timeout = 200;
  167. /* Wait till MDIO interface is ready to accept a new transaction. */
  168. while (timeout && (!(readl(&regs->mdio_mcr)
  169. & XAE_MDIO_MCR_READY_MASK))) {
  170. timeout--;
  171. udelay(1);
  172. }
  173. if (!timeout) {
  174. printf("%s: Timeout\n", __func__);
  175. return 1;
  176. }
  177. return 0;
  178. }
  179. /**
  180. * axienet_dma_write - Memory mapped Axi DMA register Buffer Descriptor write.
  181. * @bd: pointer to BD descriptor structure
  182. * @desc: Address offset of DMA descriptors
  183. *
  184. * This function writes the value into the corresponding Axi DMA register.
  185. */
  186. static inline void axienet_dma_write(struct axidma_bd *bd, u32 *desc)
  187. {
  188. #if defined(CONFIG_PHYS_64BIT)
  189. writeq((unsigned long)bd, desc);
  190. #else
  191. writel((u32)bd, desc);
  192. #endif
  193. }
  194. static u32 phyread(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
  195. u16 *val)
  196. {
  197. struct axi_regs *regs = priv->iobase;
  198. u32 mdioctrlreg = 0;
  199. if (mdio_wait(regs))
  200. return 1;
  201. mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
  202. XAE_MDIO_MCR_PHYAD_MASK) |
  203. ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
  204. & XAE_MDIO_MCR_REGAD_MASK) |
  205. XAE_MDIO_MCR_INITIATE_MASK |
  206. XAE_MDIO_MCR_OP_READ_MASK;
  207. writel(mdioctrlreg, &regs->mdio_mcr);
  208. if (mdio_wait(regs))
  209. return 1;
  210. /* Read data */
  211. *val = readl(&regs->mdio_mrd);
  212. return 0;
  213. }
  214. static u32 phywrite(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
  215. u32 data)
  216. {
  217. struct axi_regs *regs = priv->iobase;
  218. u32 mdioctrlreg = 0;
  219. if (mdio_wait(regs))
  220. return 1;
  221. mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
  222. XAE_MDIO_MCR_PHYAD_MASK) |
  223. ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
  224. & XAE_MDIO_MCR_REGAD_MASK) |
  225. XAE_MDIO_MCR_INITIATE_MASK |
  226. XAE_MDIO_MCR_OP_WRITE_MASK;
  227. /* Write data */
  228. writel(data, &regs->mdio_mwd);
  229. writel(mdioctrlreg, &regs->mdio_mcr);
  230. if (mdio_wait(regs))
  231. return 1;
  232. return 0;
  233. }
  234. static int axiemac_phy_init(struct udevice *dev)
  235. {
  236. u16 phyreg;
  237. int i;
  238. u32 ret;
  239. struct axidma_priv *priv = dev_get_priv(dev);
  240. struct axi_regs *regs = priv->iobase;
  241. struct phy_device *phydev;
  242. u32 supported = SUPPORTED_10baseT_Half |
  243. SUPPORTED_10baseT_Full |
  244. SUPPORTED_100baseT_Half |
  245. SUPPORTED_100baseT_Full |
  246. SUPPORTED_1000baseT_Half |
  247. SUPPORTED_1000baseT_Full;
  248. /* Set default MDIO divisor */
  249. writel(XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK, &regs->mdio_mc);
  250. if (priv->phyaddr == -1) {
  251. /* Detect the PHY address */
  252. for (i = 31; i >= 0; i--) {
  253. ret = phyread(priv, i, PHY_DETECT_REG, &phyreg);
  254. if (!ret && (phyreg != 0xFFFF) &&
  255. ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
  256. /* Found a valid PHY address */
  257. priv->phyaddr = i;
  258. debug("axiemac: Found valid phy address, %x\n",
  259. i);
  260. break;
  261. }
  262. }
  263. }
  264. /* Interface - look at tsec */
  265. phydev = phy_connect(priv->bus, priv->phyaddr, dev, priv->interface);
  266. phydev->supported &= supported;
  267. phydev->advertising = phydev->supported;
  268. priv->phydev = phydev;
  269. if (priv->phy_of_handle)
  270. priv->phydev->node = offset_to_ofnode(priv->phy_of_handle);
  271. phy_config(phydev);
  272. return 0;
  273. }
  274. /* Setting axi emac and phy to proper setting */
  275. static int setup_phy(struct udevice *dev)
  276. {
  277. u16 temp;
  278. u32 speed, emmc_reg, ret;
  279. struct axidma_priv *priv = dev_get_priv(dev);
  280. struct axi_regs *regs = priv->iobase;
  281. struct phy_device *phydev = priv->phydev;
  282. if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
  283. /*
  284. * In SGMII cases the isolate bit might set
  285. * after DMA and ethernet resets and hence
  286. * check and clear if set.
  287. */
  288. ret = phyread(priv, priv->phyaddr, MII_BMCR, &temp);
  289. if (ret)
  290. return 0;
  291. if (temp & BMCR_ISOLATE) {
  292. temp &= ~BMCR_ISOLATE;
  293. ret = phywrite(priv, priv->phyaddr, MII_BMCR, temp);
  294. if (ret)
  295. return 0;
  296. }
  297. }
  298. if (phy_startup(phydev)) {
  299. printf("axiemac: could not initialize PHY %s\n",
  300. phydev->dev->name);
  301. return 0;
  302. }
  303. if (!phydev->link) {
  304. printf("%s: No link.\n", phydev->dev->name);
  305. return 0;
  306. }
  307. switch (phydev->speed) {
  308. case 1000:
  309. speed = XAE_EMMC_LINKSPD_1000;
  310. break;
  311. case 100:
  312. speed = XAE_EMMC_LINKSPD_100;
  313. break;
  314. case 10:
  315. speed = XAE_EMMC_LINKSPD_10;
  316. break;
  317. default:
  318. return 0;
  319. }
  320. /* Setup the emac for the phy speed */
  321. emmc_reg = readl(&regs->emmc);
  322. emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
  323. emmc_reg |= speed;
  324. /* Write new speed setting out to Axi Ethernet */
  325. writel(emmc_reg, &regs->emmc);
  326. /*
  327. * Setting the operating speed of the MAC needs a delay. There
  328. * doesn't seem to be register to poll, so please consider this
  329. * during your application design.
  330. */
  331. udelay(1);
  332. return 1;
  333. }
  334. /* STOP DMA transfers */
  335. static void axiemac_stop(struct udevice *dev)
  336. {
  337. struct axidma_priv *priv = dev_get_priv(dev);
  338. u32 temp;
  339. /* Stop the hardware */
  340. temp = readl(&priv->dmatx->control);
  341. temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
  342. writel(temp, &priv->dmatx->control);
  343. temp = readl(&priv->dmarx->control);
  344. temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
  345. writel(temp, &priv->dmarx->control);
  346. debug("axiemac: Halted\n");
  347. }
  348. static int xxv_axi_ethernet_init(struct axidma_priv *priv)
  349. {
  350. struct xxv_axi_regs *regs = (struct xxv_axi_regs *)priv->iobase;
  351. writel(readl(&regs->rcw1) | XXV_RCW1_FCS_MASK, &regs->rcw1);
  352. writel(readl(&regs->tc) | XXV_TC_FCS_MASK, &regs->tc);
  353. writel(readl(&regs->tc) | XXV_TC_TX_MASK, &regs->tc);
  354. writel(readl(&regs->rcw1) | XXV_RCW1_RX_MASK, &regs->rcw1);
  355. return 0;
  356. }
  357. static int axi_ethernet_init(struct axidma_priv *priv)
  358. {
  359. struct axi_regs *regs = priv->iobase;
  360. int err;
  361. /*
  362. * Check the status of the MgtRdy bit in the interrupt status
  363. * registers. This must be done to allow the MGT clock to become stable
  364. * for the Sgmii and 1000BaseX PHY interfaces. No other register reads
  365. * will be valid until this bit is valid.
  366. * The bit is always a 1 for all other PHY interfaces.
  367. * Interrupt status and enable registers are not available in non
  368. * processor mode and hence bypass in this mode
  369. */
  370. if (!priv->eth_hasnobuf) {
  371. err = wait_for_bit_le32(&regs->is, XAE_INT_MGTRDY_MASK,
  372. true, 200, false);
  373. if (err) {
  374. printf("%s: Timeout\n", __func__);
  375. return 1;
  376. }
  377. /*
  378. * Stop the device and reset HW
  379. * Disable interrupts
  380. */
  381. writel(0, &regs->ie);
  382. }
  383. /* Disable the receiver */
  384. writel(readl(&regs->rcw1) & ~XAE_RCW1_RX_MASK, &regs->rcw1);
  385. /*
  386. * Stopping the receiver in mid-packet causes a dropped packet
  387. * indication from HW. Clear it.
  388. */
  389. if (!priv->eth_hasnobuf) {
  390. /* Set the interrupt status register to clear the interrupt */
  391. writel(XAE_INT_RXRJECT_MASK, &regs->is);
  392. }
  393. /* Setup HW */
  394. /* Set default MDIO divisor */
  395. writel(XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK, &regs->mdio_mc);
  396. debug("axiemac: InitHw done\n");
  397. return 0;
  398. }
  399. static int axiemac_write_hwaddr(struct udevice *dev)
  400. {
  401. struct eth_pdata *pdata = dev_get_plat(dev);
  402. struct axidma_priv *priv = dev_get_priv(dev);
  403. struct axi_regs *regs = priv->iobase;
  404. if (priv->mactype != EMAC_1G)
  405. return 0;
  406. /* Set the MAC address */
  407. int val = ((pdata->enetaddr[3] << 24) | (pdata->enetaddr[2] << 16) |
  408. (pdata->enetaddr[1] << 8) | (pdata->enetaddr[0]));
  409. writel(val, &regs->uaw0);
  410. val = (pdata->enetaddr[5] << 8) | pdata->enetaddr[4];
  411. val |= readl(&regs->uaw1) & ~XAE_UAW1_UNICASTADDR_MASK;
  412. writel(val, &regs->uaw1);
  413. return 0;
  414. }
  415. /* Reset DMA engine */
  416. static void axi_dma_init(struct axidma_priv *priv)
  417. {
  418. u32 timeout = 500;
  419. /* Reset the engine so the hardware starts from a known state */
  420. writel(XAXIDMA_CR_RESET_MASK, &priv->dmatx->control);
  421. writel(XAXIDMA_CR_RESET_MASK, &priv->dmarx->control);
  422. /* At the initialization time, hardware should finish reset quickly */
  423. while (timeout--) {
  424. /* Check transmit/receive channel */
  425. /* Reset is done when the reset bit is low */
  426. if (!((readl(&priv->dmatx->control) |
  427. readl(&priv->dmarx->control))
  428. & XAXIDMA_CR_RESET_MASK)) {
  429. break;
  430. }
  431. }
  432. if (!timeout)
  433. printf("%s: Timeout\n", __func__);
  434. }
  435. static int axiemac_start(struct udevice *dev)
  436. {
  437. struct axidma_priv *priv = dev_get_priv(dev);
  438. u32 temp;
  439. debug("axiemac: Init started\n");
  440. /*
  441. * Initialize AXIDMA engine. AXIDMA engine must be initialized before
  442. * AxiEthernet. During AXIDMA engine initialization, AXIDMA hardware is
  443. * reset, and since AXIDMA reset line is connected to AxiEthernet, this
  444. * would ensure a reset of AxiEthernet.
  445. */
  446. axi_dma_init(priv);
  447. /* Initialize AxiEthernet hardware. */
  448. if (priv->mactype == EMAC_1G) {
  449. if (axi_ethernet_init(priv))
  450. return -1;
  451. } else {
  452. if (xxv_axi_ethernet_init(priv))
  453. return -1;
  454. }
  455. /* Disable all RX interrupts before RxBD space setup */
  456. temp = readl(&priv->dmarx->control);
  457. temp &= ~XAXIDMA_IRQ_ALL_MASK;
  458. writel(temp, &priv->dmarx->control);
  459. /* Start DMA RX channel. Now it's ready to receive data.*/
  460. axienet_dma_write(&rx_bd, &priv->dmarx->current);
  461. /* Setup the BD. */
  462. memset(&rx_bd, 0, sizeof(rx_bd));
  463. rx_bd.next_desc = lower_32_bits((unsigned long)&rx_bd);
  464. rx_bd.buf_addr = lower_32_bits((unsigned long)&rxframe);
  465. #if defined(CONFIG_PHYS_64BIT)
  466. rx_bd.next_desc_msb = upper_32_bits((unsigned long)&rx_bd);
  467. rx_bd.buf_addr_msb = upper_32_bits((unsigned long)&rxframe);
  468. #endif
  469. rx_bd.cntrl = sizeof(rxframe);
  470. /* Flush the last BD so DMA core could see the updates */
  471. flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
  472. /* It is necessary to flush rxframe because if you don't do it
  473. * then cache can contain uninitialized data */
  474. flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
  475. /* Start the hardware */
  476. temp = readl(&priv->dmarx->control);
  477. temp |= XAXIDMA_CR_RUNSTOP_MASK;
  478. writel(temp, &priv->dmarx->control);
  479. /* Rx BD is ready - start */
  480. axienet_dma_write(&rx_bd, &priv->dmarx->tail);
  481. if (priv->mactype == EMAC_1G) {
  482. struct axi_regs *regs = priv->iobase;
  483. /* Enable TX */
  484. writel(XAE_TC_TX_MASK, &regs->tc);
  485. /* Enable RX */
  486. writel(XAE_RCW1_RX_MASK, &regs->rcw1);
  487. /* PHY setup */
  488. if (!setup_phy(dev)) {
  489. axiemac_stop(dev);
  490. return -1;
  491. }
  492. } else {
  493. struct xxv_axi_regs *regs = (struct xxv_axi_regs *)priv->iobase;
  494. /* Enable TX */
  495. writel(readl(&regs->tc) | XXV_TC_TX_MASK, &regs->tc);
  496. /* Enable RX */
  497. writel(readl(&regs->rcw1) | XXV_RCW1_RX_MASK, &regs->rcw1);
  498. }
  499. debug("axiemac: Init complete\n");
  500. return 0;
  501. }
  502. static int axiemac_send(struct udevice *dev, void *ptr, int len)
  503. {
  504. struct axidma_priv *priv = dev_get_priv(dev);
  505. u32 timeout;
  506. if (len > PKTSIZE_ALIGN)
  507. len = PKTSIZE_ALIGN;
  508. /* If size is less than min packet size, pad to min size */
  509. if (priv->mactype == EMAC_10G_25G && len < XXV_MIN_PKT_SIZE) {
  510. memset(txminframe, 0, XXV_MIN_PKT_SIZE);
  511. memcpy(txminframe, ptr, len);
  512. len = XXV_MIN_PKT_SIZE;
  513. ptr = txminframe;
  514. }
  515. /* Flush packet to main memory to be trasfered by DMA */
  516. flush_cache((phys_addr_t)ptr, len);
  517. /* Setup Tx BD */
  518. memset(&tx_bd, 0, sizeof(tx_bd));
  519. /* At the end of the ring, link the last BD back to the top */
  520. tx_bd.next_desc = lower_32_bits((unsigned long)&tx_bd);
  521. tx_bd.buf_addr = lower_32_bits((unsigned long)ptr);
  522. #if defined(CONFIG_PHYS_64BIT)
  523. tx_bd.next_desc_msb = upper_32_bits((unsigned long)&tx_bd);
  524. tx_bd.buf_addr_msb = upper_32_bits((unsigned long)ptr);
  525. #endif
  526. /* Save len */
  527. tx_bd.cntrl = len | XAXIDMA_BD_CTRL_TXSOF_MASK |
  528. XAXIDMA_BD_CTRL_TXEOF_MASK;
  529. /* Flush the last BD so DMA core could see the updates */
  530. flush_cache((phys_addr_t)&tx_bd, sizeof(tx_bd));
  531. if (readl(&priv->dmatx->status) & XAXIDMA_HALTED_MASK) {
  532. u32 temp;
  533. axienet_dma_write(&tx_bd, &priv->dmatx->current);
  534. /* Start the hardware */
  535. temp = readl(&priv->dmatx->control);
  536. temp |= XAXIDMA_CR_RUNSTOP_MASK;
  537. writel(temp, &priv->dmatx->control);
  538. }
  539. /* Start transfer */
  540. axienet_dma_write(&tx_bd, &priv->dmatx->tail);
  541. /* Wait for transmission to complete */
  542. debug("axiemac: Waiting for tx to be done\n");
  543. timeout = 200;
  544. while (timeout && (!(readl(&priv->dmatx->status) &
  545. (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))) {
  546. timeout--;
  547. udelay(1);
  548. }
  549. if (!timeout) {
  550. printf("%s: Timeout\n", __func__);
  551. return 1;
  552. }
  553. debug("axiemac: Sending complete\n");
  554. return 0;
  555. }
  556. static int isrxready(struct axidma_priv *priv)
  557. {
  558. u32 status;
  559. /* Read pending interrupts */
  560. status = readl(&priv->dmarx->status);
  561. /* Acknowledge pending interrupts */
  562. writel(status & XAXIDMA_IRQ_ALL_MASK, &priv->dmarx->status);
  563. /*
  564. * If Reception done interrupt is asserted, call RX call back function
  565. * to handle the processed BDs and then raise the according flag.
  566. */
  567. if ((status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))
  568. return 1;
  569. return 0;
  570. }
  571. static int axiemac_recv(struct udevice *dev, int flags, uchar **packetp)
  572. {
  573. u32 length;
  574. struct axidma_priv *priv = dev_get_priv(dev);
  575. u32 temp;
  576. /* Wait for an incoming packet */
  577. if (!isrxready(priv))
  578. return -1;
  579. debug("axiemac: RX data ready\n");
  580. /* Disable IRQ for a moment till packet is handled */
  581. temp = readl(&priv->dmarx->control);
  582. temp &= ~XAXIDMA_IRQ_ALL_MASK;
  583. writel(temp, &priv->dmarx->control);
  584. if (!priv->eth_hasnobuf && priv->mactype == EMAC_1G)
  585. length = rx_bd.app4 & 0xFFFF; /* max length mask */
  586. else
  587. length = rx_bd.status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
  588. #ifdef DEBUG
  589. print_buffer(&rxframe, &rxframe[0], 1, length, 16);
  590. #endif
  591. *packetp = rxframe;
  592. return length;
  593. }
  594. static int axiemac_free_pkt(struct udevice *dev, uchar *packet, int length)
  595. {
  596. struct axidma_priv *priv = dev_get_priv(dev);
  597. #ifdef DEBUG
  598. /* It is useful to clear buffer to be sure that it is consistent */
  599. memset(rxframe, 0, sizeof(rxframe));
  600. #endif
  601. /* Setup RxBD */
  602. /* Clear the whole buffer and setup it again - all flags are cleared */
  603. memset(&rx_bd, 0, sizeof(rx_bd));
  604. rx_bd.next_desc = lower_32_bits((unsigned long)&rx_bd);
  605. rx_bd.buf_addr = lower_32_bits((unsigned long)&rxframe);
  606. #if defined(CONFIG_PHYS_64BIT)
  607. rx_bd.next_desc_msb = upper_32_bits((unsigned long)&rx_bd);
  608. rx_bd.buf_addr_msb = upper_32_bits((unsigned long)&rxframe);
  609. #endif
  610. rx_bd.cntrl = sizeof(rxframe);
  611. /* Write bd to HW */
  612. flush_cache((phys_addr_t)&rx_bd, sizeof(rx_bd));
  613. /* It is necessary to flush rxframe because if you don't do it
  614. * then cache will contain previous packet */
  615. flush_cache((phys_addr_t)&rxframe, sizeof(rxframe));
  616. /* Rx BD is ready - start again */
  617. axienet_dma_write(&rx_bd, &priv->dmarx->tail);
  618. debug("axiemac: RX completed, framelength = %d\n", length);
  619. return 0;
  620. }
  621. static int axiemac_miiphy_read(struct mii_dev *bus, int addr,
  622. int devad, int reg)
  623. {
  624. int ret;
  625. u16 value;
  626. ret = phyread(bus->priv, addr, reg, &value);
  627. debug("axiemac: Read MII 0x%x, 0x%x, 0x%x, %d\n", addr, reg,
  628. value, ret);
  629. return value;
  630. }
  631. static int axiemac_miiphy_write(struct mii_dev *bus, int addr, int devad,
  632. int reg, u16 value)
  633. {
  634. debug("axiemac: Write MII 0x%x, 0x%x, 0x%x\n", addr, reg, value);
  635. return phywrite(bus->priv, addr, reg, value);
  636. }
  637. static int axi_emac_probe(struct udevice *dev)
  638. {
  639. struct axidma_plat *plat = dev_get_plat(dev);
  640. struct eth_pdata *pdata = &plat->eth_pdata;
  641. struct axidma_priv *priv = dev_get_priv(dev);
  642. int ret;
  643. priv->iobase = (struct axi_regs *)pdata->iobase;
  644. priv->dmatx = plat->dmatx;
  645. /* RX channel offset is 0x30 */
  646. priv->dmarx = (struct axidma_reg *)((phys_addr_t)priv->dmatx + 0x30);
  647. priv->mactype = plat->mactype;
  648. if (priv->mactype == EMAC_1G) {
  649. priv->eth_hasnobuf = plat->eth_hasnobuf;
  650. priv->phyaddr = plat->phyaddr;
  651. priv->phy_of_handle = plat->phy_of_handle;
  652. priv->interface = pdata->phy_interface;
  653. priv->bus = mdio_alloc();
  654. priv->bus->read = axiemac_miiphy_read;
  655. priv->bus->write = axiemac_miiphy_write;
  656. priv->bus->priv = priv;
  657. ret = mdio_register_seq(priv->bus, dev_seq(dev));
  658. if (ret)
  659. return ret;
  660. axiemac_phy_init(dev);
  661. }
  662. return 0;
  663. }
  664. static int axi_emac_remove(struct udevice *dev)
  665. {
  666. struct axidma_priv *priv = dev_get_priv(dev);
  667. if (priv->mactype == EMAC_1G) {
  668. free(priv->phydev);
  669. mdio_unregister(priv->bus);
  670. mdio_free(priv->bus);
  671. }
  672. return 0;
  673. }
  674. static const struct eth_ops axi_emac_ops = {
  675. .start = axiemac_start,
  676. .send = axiemac_send,
  677. .recv = axiemac_recv,
  678. .free_pkt = axiemac_free_pkt,
  679. .stop = axiemac_stop,
  680. .write_hwaddr = axiemac_write_hwaddr,
  681. };
  682. static int axi_emac_of_to_plat(struct udevice *dev)
  683. {
  684. struct axidma_plat *plat = dev_get_plat(dev);
  685. struct eth_pdata *pdata = &plat->eth_pdata;
  686. int node = dev_of_offset(dev);
  687. int offset = 0;
  688. const char *phy_mode;
  689. pdata->iobase = dev_read_addr(dev);
  690. plat->mactype = dev_get_driver_data(dev);
  691. offset = fdtdec_lookup_phandle(gd->fdt_blob, node,
  692. "axistream-connected");
  693. if (offset <= 0) {
  694. printf("%s: axistream is not found\n", __func__);
  695. return -EINVAL;
  696. }
  697. plat->dmatx = (struct axidma_reg *)fdtdec_get_addr(gd->fdt_blob,
  698. offset, "reg");
  699. if (!plat->dmatx) {
  700. printf("%s: axi_dma register space not found\n", __func__);
  701. return -EINVAL;
  702. }
  703. if (plat->mactype == EMAC_1G) {
  704. plat->phyaddr = -1;
  705. offset = fdtdec_lookup_phandle(gd->fdt_blob, node,
  706. "phy-handle");
  707. if (offset > 0) {
  708. plat->phyaddr = fdtdec_get_int(gd->fdt_blob, offset,
  709. "reg", -1);
  710. plat->phy_of_handle = offset;
  711. }
  712. phy_mode = fdt_getprop(gd->fdt_blob, node, "phy-mode", NULL);
  713. if (phy_mode)
  714. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  715. if (pdata->phy_interface == -1) {
  716. printf("%s: Invalid PHY interface '%s'\n", __func__,
  717. phy_mode);
  718. return -EINVAL;
  719. }
  720. plat->eth_hasnobuf = fdtdec_get_bool(gd->fdt_blob, node,
  721. "xlnx,eth-hasnobuf");
  722. }
  723. printf("AXI EMAC: %lx, phyaddr %d, interface %s\n", (ulong)pdata->iobase,
  724. plat->phyaddr, phy_string_for_interface(pdata->phy_interface));
  725. return 0;
  726. }
  727. static const struct udevice_id axi_emac_ids[] = {
  728. { .compatible = "xlnx,axi-ethernet-1.00.a", .data = (uintptr_t)EMAC_1G },
  729. { .compatible = "xlnx,xxv-ethernet-1.0", .data = (uintptr_t)EMAC_10G_25G },
  730. { }
  731. };
  732. U_BOOT_DRIVER(axi_emac) = {
  733. .name = "axi_emac",
  734. .id = UCLASS_ETH,
  735. .of_match = axi_emac_ids,
  736. .of_to_plat = axi_emac_of_to_plat,
  737. .probe = axi_emac_probe,
  738. .remove = axi_emac_remove,
  739. .ops = &axi_emac_ops,
  740. .priv_auto = sizeof(struct axidma_priv),
  741. .plat_auto = sizeof(struct axidma_plat),
  742. };