designware.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2010
  4. * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
  5. */
  6. /*
  7. * Designware ethernet IP driver for U-Boot
  8. */
  9. #include <common.h>
  10. #include <clk.h>
  11. #include <cpu_func.h>
  12. #include <dm.h>
  13. #include <errno.h>
  14. #include <log.h>
  15. #include <miiphy.h>
  16. #include <malloc.h>
  17. #include <net.h>
  18. #include <pci.h>
  19. #include <reset.h>
  20. #include <asm/cache.h>
  21. #include <dm/device_compat.h>
  22. #include <dm/devres.h>
  23. #include <linux/compiler.h>
  24. #include <linux/delay.h>
  25. #include <linux/err.h>
  26. #include <linux/kernel.h>
  27. #include <asm/io.h>
  28. #include <power/regulator.h>
  29. #include "designware.h"
  30. #if CONFIG_IS_ENABLED(TARGET_STARFIVE_VIC7100)
  31. #include <asm/arch/clkgen_ctrl_macro.h>
  32. #endif
  33. #if CONFIG_IS_ENABLED(ETH_DESIGNWARE_PRESETED_BUFF)
  34. #define TX_MAC_DES preseted_tx_mac_descrtable
  35. #define RX_MAC_DES preseted_rx_mac_descrtable
  36. #define TX_BUFF preseted_txbuffs
  37. #define RX_BUFF preseted_rxbuffs
  38. struct dmamacdescr *preseted_tx_mac_descrtable =
  39. (struct dmamacdescr *)CONFIG_ETH_DESIGNWARE_PRESETED_TX_MAC_DES_BASE;
  40. struct dmamacdescr *preseted_rx_mac_descrtable =
  41. (struct dmamacdescr *)CONFIG_ETH_DESIGNWARE_PRESETED_RX_MAC_DES_BASE;
  42. char *preseted_txbuffs = (char *)CONFIG_ETH_DESIGNWARE_PRESETED_TXBUFF_BASE;
  43. char *preseted_rxbuffs = (char *)CONFIG_ETH_DESIGNWARE_PRESETED_RXBUFF_BASE;
  44. #else
  45. #define TX_MAC_DES priv->tx_mac_descrtable
  46. #define RX_MAC_DES priv->rx_mac_descrtable
  47. #define TX_BUFF priv->txbuffs
  48. #define RX_BUFF priv->rxbuffs
  49. #endif
  50. static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
  51. {
  52. #ifdef CONFIG_DM_ETH
  53. struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
  54. struct eth_mac_regs *mac_p = priv->mac_regs_p;
  55. #else
  56. struct eth_mac_regs *mac_p = bus->priv;
  57. #endif
  58. unsigned int start;
  59. u16 miiaddr;
  60. int timeout = CONFIG_MDIO_TIMEOUT;
  61. miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
  62. ((reg << MIIREGSHIFT) & MII_REGMSK);
  63. writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
  64. start = get_timer(0);
  65. while (get_timer(start) < timeout) {
  66. if (!(readl(&mac_p->miiaddr) & MII_BUSY))
  67. return readl(&mac_p->miidata);
  68. udelay(10);
  69. };
  70. return -ETIMEDOUT;
  71. }
  72. static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
  73. u16 val)
  74. {
  75. #ifdef CONFIG_DM_ETH
  76. struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
  77. struct eth_mac_regs *mac_p = priv->mac_regs_p;
  78. #else
  79. struct eth_mac_regs *mac_p = bus->priv;
  80. #endif
  81. unsigned int start;
  82. u16 miiaddr;
  83. int ret = -ETIMEDOUT, timeout = CONFIG_MDIO_TIMEOUT;
  84. writel(val, &mac_p->miidata);
  85. miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
  86. ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
  87. writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
  88. start = get_timer(0);
  89. while (get_timer(start) < timeout) {
  90. if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
  91. ret = 0;
  92. break;
  93. }
  94. udelay(10);
  95. };
  96. return ret;
  97. }
  98. #if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
  99. static int dw_mdio_reset(struct mii_dev *bus)
  100. {
  101. struct udevice *dev = bus->priv;
  102. struct dw_eth_dev *priv = dev_get_priv(dev);
  103. struct dw_eth_pdata *pdata = dev_get_platdata(dev);
  104. int ret;
  105. if (!dm_gpio_is_valid(&priv->reset_gpio))
  106. return 0;
  107. /* reset the phy */
  108. ret = dm_gpio_set_value(&priv->reset_gpio, 0);
  109. if (ret)
  110. return ret;
  111. udelay(pdata->reset_delays[0]);
  112. ret = dm_gpio_set_value(&priv->reset_gpio, 1);
  113. if (ret)
  114. return ret;
  115. udelay(pdata->reset_delays[1]);
  116. ret = dm_gpio_set_value(&priv->reset_gpio, 0);
  117. if (ret)
  118. return ret;
  119. udelay(pdata->reset_delays[2]);
  120. return 0;
  121. }
  122. #endif
  123. static int dw_mdio_init(const char *name, void *priv)
  124. {
  125. struct mii_dev *bus = mdio_alloc();
  126. if (!bus) {
  127. printf("Failed to allocate MDIO bus\n");
  128. return -ENOMEM;
  129. }
  130. bus->read = dw_mdio_read;
  131. bus->write = dw_mdio_write;
  132. snprintf(bus->name, sizeof(bus->name), "%s", name);
  133. #if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
  134. bus->reset = dw_mdio_reset;
  135. #endif
  136. bus->priv = priv;
  137. return mdio_register(bus);
  138. }
  139. static void tx_descs_init(struct dw_eth_dev *priv)
  140. {
  141. struct eth_dma_regs *dma_p = priv->dma_regs_p;
  142. struct dmamacdescr *desc_table_p = &TX_MAC_DES[0];
  143. char *txbuffs = &TX_BUFF[0];
  144. struct dmamacdescr *desc_p;
  145. u32 idx;
  146. for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
  147. desc_p = &desc_table_p[idx];
  148. desc_p->dmamac_addr = (unsigned int)&txbuffs[idx * CONFIG_ETH_BUFSIZE];
  149. desc_p->dmamac_next = (unsigned int)&desc_table_p[idx + 1];
  150. #if defined(CONFIG_DW_ALTDESCRIPTOR)
  151. desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
  152. DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
  153. DESC_TXSTS_TXCHECKINSCTRL |
  154. DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
  155. desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
  156. desc_p->dmamac_cntl = 0;
  157. desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
  158. #else
  159. desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
  160. desc_p->txrx_status = 0;
  161. #endif
  162. }
  163. /* Correcting the last pointer of the chain */
  164. desc_p->dmamac_next = (unsigned int)&desc_table_p[0];
  165. /* Flush all Tx buffer descriptors at once */
  166. flush_dcache_range((unsigned int)TX_MAC_DES,
  167. (unsigned int)TX_MAC_DES +
  168. sizeof(struct dmamacdescr) * CONFIG_TX_DESCR_NUM);
  169. writel((unsigned int)&desc_table_p[0], &dma_p->txdesclistaddr);
  170. priv->tx_currdescnum = 0;
  171. }
  172. static void rx_descs_init(struct dw_eth_dev *priv)
  173. {
  174. struct eth_dma_regs *dma_p = priv->dma_regs_p;
  175. struct dmamacdescr *desc_table_p = &RX_MAC_DES[0];
  176. char *rxbuffs = &RX_BUFF[0];
  177. struct dmamacdescr *desc_p;
  178. u32 idx;
  179. /* Before passing buffers to GMAC we need to make sure zeros
  180. * written there right after "priv" structure allocation were
  181. * flushed into RAM.
  182. * Otherwise there's a chance to get some of them flushed in RAM when
  183. * GMAC is already pushing data to RAM via DMA. This way incoming from
  184. * GMAC data will be corrupted. */
  185. flush_dcache_range((unsigned int)rxbuffs, (unsigned int)rxbuffs + RX_TOTAL_BUFSIZE);
  186. for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
  187. desc_p = &desc_table_p[idx];
  188. desc_p->dmamac_addr = (unsigned int)&rxbuffs[idx * CONFIG_ETH_BUFSIZE];
  189. desc_p->dmamac_next = (unsigned int)&desc_table_p[idx + 1];
  190. desc_p->dmamac_cntl =
  191. (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
  192. DESC_RXCTRL_RXCHAIN;
  193. desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
  194. }
  195. /* Correcting the last pointer of the chain */
  196. desc_p->dmamac_next = (unsigned int)&desc_table_p[0];
  197. /* Flush all Rx buffer descriptors at once */
  198. flush_dcache_range((unsigned int)RX_MAC_DES,
  199. (unsigned int)RX_MAC_DES +
  200. sizeof(struct dmamacdescr) * CONFIG_RX_DESCR_NUM);
  201. writel((unsigned int)&desc_table_p[0], &dma_p->rxdesclistaddr);
  202. priv->rx_currdescnum = 0;
  203. }
  204. static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
  205. {
  206. struct eth_mac_regs *mac_p = priv->mac_regs_p;
  207. u32 macid_lo, macid_hi;
  208. macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
  209. (mac_id[3] << 24);
  210. macid_hi = mac_id[4] + (mac_id[5] << 8);
  211. writel(macid_hi, &mac_p->macaddr0hi);
  212. writel(macid_lo, &mac_p->macaddr0lo);
  213. return 0;
  214. }
  215. static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p,
  216. struct phy_device *phydev)
  217. {
  218. u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
  219. if (!phydev->link) {
  220. printf("%s: No link.\n", phydev->dev->name);
  221. return 0;
  222. }
  223. if (phydev->speed != 1000)
  224. conf |= MII_PORTSELECT;
  225. else
  226. conf &= ~MII_PORTSELECT;
  227. if (phydev->speed == 100)
  228. conf |= FES_100;
  229. if (phydev->duplex)
  230. conf |= FULLDPLXMODE;
  231. #if CONFIG_IS_ENABLED(TARGET_STARFIVE_VIC7100)
  232. switch (phydev->speed) {
  233. case 1000:
  234. _DIVIDE_CLOCK_clk_gmac_gtxclk_(4);
  235. break;
  236. case 100:
  237. _DIVIDE_CLOCK_clk_gmac_gtxclk_(20);
  238. break;
  239. case 10:
  240. _DIVIDE_CLOCK_clk_gmac_gtxclk_(200);
  241. break;
  242. default:
  243. break;
  244. }
  245. #endif
  246. writel(conf, &mac_p->conf);
  247. printf("Speed: %d, %s duplex%s\n", phydev->speed,
  248. (phydev->duplex) ? "full" : "half",
  249. (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
  250. return 0;
  251. }
  252. static void _dw_eth_halt(struct dw_eth_dev *priv)
  253. {
  254. struct eth_mac_regs *mac_p = priv->mac_regs_p;
  255. struct eth_dma_regs *dma_p = priv->dma_regs_p;
  256. writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
  257. writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
  258. phy_shutdown(priv->phydev);
  259. }
  260. int designware_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
  261. {
  262. struct eth_mac_regs *mac_p = priv->mac_regs_p;
  263. struct eth_dma_regs *dma_p = priv->dma_regs_p;
  264. unsigned int start;
  265. int ret;
  266. writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
  267. /*
  268. * When a MII PHY is used, we must set the PS bit for the DMA
  269. * reset to succeed.
  270. */
  271. if (priv->phydev->interface == PHY_INTERFACE_MODE_MII)
  272. writel(readl(&mac_p->conf) | MII_PORTSELECT, &mac_p->conf);
  273. else
  274. writel(readl(&mac_p->conf) & ~MII_PORTSELECT, &mac_p->conf);
  275. start = get_timer(0);
  276. while (readl(&dma_p->busmode) & DMAMAC_SRST) {
  277. if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
  278. printf("DMA reset timeout\n");
  279. return -ETIMEDOUT;
  280. }
  281. mdelay(100);
  282. };
  283. /*
  284. * Soft reset above clears HW address registers.
  285. * So we have to set it here once again.
  286. */
  287. _dw_write_hwaddr(priv, enetaddr);
  288. rx_descs_init(priv);
  289. tx_descs_init(priv);
  290. writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
  291. #ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
  292. writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
  293. &dma_p->opmode);
  294. #else
  295. writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
  296. &dma_p->opmode);
  297. #endif
  298. writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
  299. #ifdef CONFIG_DW_AXI_BURST_LEN
  300. writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
  301. #endif
  302. /* Start up the PHY */
  303. ret = phy_startup(priv->phydev);
  304. if (ret) {
  305. printf("Could not initialize PHY %s\n",
  306. priv->phydev->dev->name);
  307. return ret;
  308. }
  309. ret = dw_adjust_link(priv, mac_p, priv->phydev);
  310. if (ret)
  311. return ret;
  312. return 0;
  313. }
  314. int designware_eth_enable(struct dw_eth_dev *priv)
  315. {
  316. struct eth_mac_regs *mac_p = priv->mac_regs_p;
  317. if (!priv->phydev->link)
  318. return -EIO;
  319. writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
  320. return 0;
  321. }
  322. #define ETH_ZLEN 60
  323. static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
  324. {
  325. struct eth_dma_regs *dma_p = priv->dma_regs_p;
  326. u32 desc_num = priv->tx_currdescnum;
  327. struct dmamacdescr *desc_p = &TX_MAC_DES[desc_num];
  328. unsigned int desc_start = (unsigned int)desc_p;
  329. unsigned int desc_end = desc_start +
  330. roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
  331. unsigned int data_start = desc_p->dmamac_addr;
  332. unsigned int data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
  333. /*
  334. * Strictly we only need to invalidate the "txrx_status" field
  335. * for the following check, but on some platforms we cannot
  336. * invalidate only 4 bytes, so we flush the entire descriptor,
  337. * which is 16 bytes in total. This is safe because the
  338. * individual descriptors in the array are each aligned to
  339. * ARCH_DMA_MINALIGN and padded appropriately.
  340. */
  341. invalidate_dcache_range(desc_start, desc_end);
  342. /* Check if the descriptor is owned by CPU */
  343. if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
  344. printf("CPU not owner of tx frame\n");
  345. return -EPERM;
  346. }
  347. memcpy((void *)data_start, packet, length);
  348. if (length < ETH_ZLEN) {
  349. memset(&((char *)data_start)[length], 0, ETH_ZLEN - length);
  350. length = ETH_ZLEN;
  351. }
  352. /* Flush data to be sent */
  353. flush_dcache_range(data_start, data_end);
  354. #if defined(CONFIG_DW_ALTDESCRIPTOR)
  355. desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
  356. desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
  357. ((length << DESC_TXCTRL_SIZE1SHFT) &
  358. DESC_TXCTRL_SIZE1MASK);
  359. desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
  360. desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
  361. #else
  362. desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
  363. ((length << DESC_TXCTRL_SIZE1SHFT) &
  364. DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
  365. DESC_TXCTRL_TXFIRST;
  366. desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
  367. #endif
  368. /* Flush modified buffer descriptor */
  369. flush_dcache_range(desc_start, desc_end);
  370. /* Test the wrap-around condition. */
  371. if (++desc_num >= CONFIG_TX_DESCR_NUM)
  372. desc_num = 0;
  373. priv->tx_currdescnum = desc_num;
  374. /* Start the transmission */
  375. writel(POLL_DATA, &dma_p->txpolldemand);
  376. return 0;
  377. }
  378. static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
  379. {
  380. u32 status, desc_num = priv->rx_currdescnum;
  381. struct dmamacdescr *desc_p = &RX_MAC_DES[desc_num];
  382. int length = -EAGAIN;
  383. unsigned int desc_start = (unsigned int)desc_p;
  384. unsigned int desc_end = desc_start +
  385. roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
  386. unsigned int data_start = desc_p->dmamac_addr;
  387. unsigned int data_end;
  388. /* Invalidate entire buffer descriptor */
  389. invalidate_dcache_range(desc_start, desc_end);
  390. status = desc_p->txrx_status;
  391. /* Check if the owner is the CPU */
  392. if (!(status & DESC_RXSTS_OWNBYDMA)) {
  393. length = (status & DESC_RXSTS_FRMLENMSK) >>
  394. DESC_RXSTS_FRMLENSHFT;
  395. /* Invalidate received data */
  396. data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
  397. invalidate_dcache_range(data_start, data_end);
  398. *packetp = (uchar *)(unsigned int)desc_p->dmamac_addr;
  399. }
  400. return length;
  401. }
  402. static int _dw_free_pkt(struct dw_eth_dev *priv)
  403. {
  404. u32 desc_num = priv->rx_currdescnum;
  405. struct dmamacdescr *desc_p = &RX_MAC_DES[desc_num];
  406. unsigned int desc_start = (unsigned int)desc_p;
  407. unsigned int desc_end = desc_start +
  408. roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
  409. /*
  410. * Make the current descriptor valid again and go to
  411. * the next one
  412. */
  413. desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
  414. /* Flush only status field - others weren't changed */
  415. flush_dcache_range(desc_start, desc_end);
  416. /* Test the wrap-around condition. */
  417. if (++desc_num >= CONFIG_RX_DESCR_NUM)
  418. desc_num = 0;
  419. priv->rx_currdescnum = desc_num;
  420. return 0;
  421. }
  422. static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
  423. {
  424. struct phy_device *phydev;
  425. int phy_addr = -1, ret;
  426. #ifdef CONFIG_PHY_ADDR
  427. phy_addr = CONFIG_PHY_ADDR;
  428. #endif
  429. phydev = phy_connect(priv->bus, phy_addr, dev, priv->interface);
  430. if (!phydev)
  431. return -ENODEV;
  432. phydev->supported &= PHY_GBIT_FEATURES;
  433. if (priv->max_speed) {
  434. ret = phy_set_supported(phydev, priv->max_speed);
  435. if (ret)
  436. return ret;
  437. }
  438. phydev->advertising = phydev->supported;
  439. priv->phydev = phydev;
  440. phy_config(phydev);
  441. return 0;
  442. }
  443. #ifndef CONFIG_DM_ETH
  444. static int dw_eth_init(struct eth_device *dev, struct bd_info *bis)
  445. {
  446. int ret;
  447. ret = designware_eth_init(dev->priv, dev->enetaddr);
  448. if (!ret)
  449. ret = designware_eth_enable(dev->priv);
  450. return ret;
  451. }
  452. static int dw_eth_send(struct eth_device *dev, void *packet, int length)
  453. {
  454. return _dw_eth_send(dev->priv, packet, length);
  455. }
  456. static int dw_eth_recv(struct eth_device *dev)
  457. {
  458. uchar *packet;
  459. int length;
  460. length = _dw_eth_recv(dev->priv, &packet);
  461. if (length == -EAGAIN)
  462. return 0;
  463. net_process_received_packet(packet, length);
  464. _dw_free_pkt(dev->priv);
  465. return 0;
  466. }
  467. static void dw_eth_halt(struct eth_device *dev)
  468. {
  469. return _dw_eth_halt(dev->priv);
  470. }
  471. static int dw_write_hwaddr(struct eth_device *dev)
  472. {
  473. return _dw_write_hwaddr(dev->priv, dev->enetaddr);
  474. }
  475. int designware_initialize(unsigned int base_addr, u32 interface)
  476. {
  477. struct eth_device *dev;
  478. struct dw_eth_dev *priv;
  479. dev = (struct eth_device *) malloc(sizeof(struct eth_device));
  480. if (!dev)
  481. return -ENOMEM;
  482. /*
  483. * Since the priv structure contains the descriptors which need a strict
  484. * buswidth alignment, memalign is used to allocate memory
  485. */
  486. priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
  487. sizeof(struct dw_eth_dev));
  488. if (!priv) {
  489. free(dev);
  490. return -ENOMEM;
  491. }
  492. #if !CONFIG_IS_ENABLED(TARGET_STARFIVE_VIC7100)
  493. if ((phys_addr_t)priv + sizeof(*priv) > (1ULL << 32)) {
  494. printf("designware: buffers are outside DMA memory\n");
  495. return -EINVAL;
  496. }
  497. #endif
  498. memset(dev, 0, sizeof(struct eth_device));
  499. memset(priv, 0, sizeof(struct dw_eth_dev));
  500. sprintf(dev->name, "dwmac.%lx", base_addr);
  501. dev->iobase = (int)base_addr;
  502. dev->priv = priv;
  503. priv->dev = dev;
  504. priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
  505. priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
  506. DW_DMA_BASE_OFFSET);
  507. dev->init = dw_eth_init;
  508. dev->send = dw_eth_send;
  509. dev->recv = dw_eth_recv;
  510. dev->halt = dw_eth_halt;
  511. dev->write_hwaddr = dw_write_hwaddr;
  512. eth_register(dev);
  513. priv->interface = interface;
  514. dw_mdio_init(dev->name, priv->mac_regs_p);
  515. priv->bus = miiphy_get_dev_by_name(dev->name);
  516. return dw_phy_init(priv, dev);
  517. }
  518. #endif
  519. #ifdef CONFIG_DM_ETH
  520. static int designware_eth_start(struct udevice *dev)
  521. {
  522. struct eth_pdata *pdata = dev_get_platdata(dev);
  523. struct dw_eth_dev *priv = dev_get_priv(dev);
  524. int ret;
  525. ret = designware_eth_init(priv, pdata->enetaddr);
  526. if (ret)
  527. return ret;
  528. ret = designware_eth_enable(priv);
  529. if (ret)
  530. return ret;
  531. return 0;
  532. }
  533. int designware_eth_send(struct udevice *dev, void *packet, int length)
  534. {
  535. struct dw_eth_dev *priv = dev_get_priv(dev);
  536. return _dw_eth_send(priv, packet, length);
  537. }
  538. int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
  539. {
  540. struct dw_eth_dev *priv = dev_get_priv(dev);
  541. return _dw_eth_recv(priv, packetp);
  542. }
  543. int designware_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
  544. {
  545. struct dw_eth_dev *priv = dev_get_priv(dev);
  546. return _dw_free_pkt(priv);
  547. }
  548. void designware_eth_stop(struct udevice *dev)
  549. {
  550. struct dw_eth_dev *priv = dev_get_priv(dev);
  551. return _dw_eth_halt(priv);
  552. }
  553. int designware_eth_write_hwaddr(struct udevice *dev)
  554. {
  555. struct eth_pdata *pdata = dev_get_platdata(dev);
  556. struct dw_eth_dev *priv = dev_get_priv(dev);
  557. return _dw_write_hwaddr(priv, pdata->enetaddr);
  558. }
  559. static int designware_eth_bind(struct udevice *dev)
  560. {
  561. #ifdef CONFIG_DM_PCI
  562. static int num_cards;
  563. char name[20];
  564. /* Create a unique device name for PCI type devices */
  565. if (device_is_on_pci_bus(dev)) {
  566. sprintf(name, "eth_designware#%u", num_cards++);
  567. device_set_name(dev, name);
  568. }
  569. #endif
  570. return 0;
  571. }
  572. int designware_eth_probe(struct udevice *dev)
  573. {
  574. struct eth_pdata *pdata = dev_get_platdata(dev);
  575. struct dw_eth_dev *priv = dev_get_priv(dev);
  576. u32 iobase = pdata->iobase;
  577. unsigned int ioaddr;
  578. int ret, err;
  579. struct reset_ctl_bulk reset_bulk;
  580. #ifdef CONFIG_CLK
  581. int i, clock_nb;
  582. priv->clock_count = 0;
  583. clock_nb = dev_count_phandle_with_args(dev, "clocks", "#clock-cells",
  584. 0);
  585. if (clock_nb > 0) {
  586. priv->clocks = devm_kcalloc(dev, clock_nb, sizeof(struct clk),
  587. GFP_KERNEL);
  588. if (!priv->clocks)
  589. return -ENOMEM;
  590. for (i = 0; i < clock_nb; i++) {
  591. err = clk_get_by_index(dev, i, &priv->clocks[i]);
  592. if (err < 0)
  593. break;
  594. err = clk_enable(&priv->clocks[i]);
  595. if (err && err != -ENOSYS && err != -ENOTSUPP) {
  596. pr_err("failed to enable clock %d\n", i);
  597. clk_free(&priv->clocks[i]);
  598. goto clk_err;
  599. }
  600. priv->clock_count++;
  601. }
  602. } else if (clock_nb != -ENOENT) {
  603. pr_err("failed to get clock phandle(%d)\n", clock_nb);
  604. return clock_nb;
  605. }
  606. #endif
  607. #if defined(CONFIG_DM_REGULATOR)
  608. struct udevice *phy_supply;
  609. ret = device_get_supply_regulator(dev, "phy-supply",
  610. &phy_supply);
  611. if (ret) {
  612. debug("%s: No phy supply\n", dev->name);
  613. } else {
  614. ret = regulator_set_enable(phy_supply, true);
  615. if (ret) {
  616. puts("Error enabling phy supply\n");
  617. return ret;
  618. }
  619. }
  620. #endif
  621. ret = reset_get_bulk(dev, &reset_bulk);
  622. if (ret)
  623. dev_warn(dev, "Can't get reset: %d\n", ret);
  624. else
  625. reset_deassert_bulk(&reset_bulk);
  626. #ifdef CONFIG_DM_PCI
  627. /*
  628. * If we are on PCI bus, either directly attached to a PCI root port,
  629. * or via a PCI bridge, fill in platdata before we probe the hardware.
  630. */
  631. if (device_is_on_pci_bus(dev)) {
  632. dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &iobase);
  633. iobase &= PCI_BASE_ADDRESS_MEM_MASK;
  634. iobase = dm_pci_mem_to_phys(dev, iobase);
  635. pdata->iobase = iobase;
  636. pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
  637. }
  638. #endif
  639. debug("%s, iobase=%x, priv=%p\n", __func__, iobase, priv);
  640. ioaddr = iobase;
  641. priv->mac_regs_p = (struct eth_mac_regs *)ioaddr;
  642. priv->dma_regs_p = (struct eth_dma_regs *)(ioaddr + DW_DMA_BASE_OFFSET);
  643. priv->interface = pdata->phy_interface;
  644. priv->max_speed = pdata->max_speed;
  645. ret = dw_mdio_init(dev->name, dev);
  646. if (ret) {
  647. err = ret;
  648. goto mdio_err;
  649. }
  650. priv->bus = miiphy_get_dev_by_name(dev->name);
  651. ret = dw_phy_init(priv, dev);
  652. debug("%s, ret=%d\n", __func__, ret);
  653. if (!ret)
  654. return 0;
  655. /* continue here for cleanup if no PHY found */
  656. err = ret;
  657. mdio_unregister(priv->bus);
  658. mdio_free(priv->bus);
  659. mdio_err:
  660. #ifdef CONFIG_CLK
  661. clk_err:
  662. ret = clk_release_all(priv->clocks, priv->clock_count);
  663. if (ret)
  664. pr_err("failed to disable all clocks\n");
  665. #endif
  666. return err;
  667. }
  668. static int designware_eth_remove(struct udevice *dev)
  669. {
  670. struct dw_eth_dev *priv = dev_get_priv(dev);
  671. free(priv->phydev);
  672. mdio_unregister(priv->bus);
  673. mdio_free(priv->bus);
  674. #ifdef CONFIG_CLK
  675. return clk_release_all(priv->clocks, priv->clock_count);
  676. #else
  677. return 0;
  678. #endif
  679. }
  680. const struct eth_ops designware_eth_ops = {
  681. .start = designware_eth_start,
  682. .send = designware_eth_send,
  683. .recv = designware_eth_recv,
  684. .free_pkt = designware_eth_free_pkt,
  685. .stop = designware_eth_stop,
  686. .write_hwaddr = designware_eth_write_hwaddr,
  687. };
  688. int designware_eth_ofdata_to_platdata(struct udevice *dev)
  689. {
  690. struct dw_eth_pdata *dw_pdata = dev_get_platdata(dev);
  691. #if CONFIG_IS_ENABLED(DM_GPIO)
  692. struct dw_eth_dev *priv = dev_get_priv(dev);
  693. #endif
  694. struct eth_pdata *pdata = &dw_pdata->eth_pdata;
  695. const char *phy_mode;
  696. #if CONFIG_IS_ENABLED(DM_GPIO)
  697. int reset_flags = GPIOD_IS_OUT;
  698. #endif
  699. int ret = 0;
  700. pdata->iobase = dev_read_addr(dev);
  701. pdata->phy_interface = -1;
  702. phy_mode = dev_read_string(dev, "phy-mode");
  703. if (phy_mode)
  704. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  705. if (pdata->phy_interface == -1) {
  706. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  707. return -EINVAL;
  708. }
  709. pdata->max_speed = dev_read_u32_default(dev, "max-speed", 0);
  710. #if CONFIG_IS_ENABLED(DM_GPIO)
  711. if (dev_read_bool(dev, "snps,reset-active-low"))
  712. reset_flags |= GPIOD_ACTIVE_LOW;
  713. ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
  714. &priv->reset_gpio, reset_flags);
  715. if (ret == 0) {
  716. ret = dev_read_u32_array(dev, "snps,reset-delays-us",
  717. dw_pdata->reset_delays, 3);
  718. } else if (ret == -ENOENT) {
  719. ret = 0;
  720. }
  721. #endif
  722. return ret;
  723. }
  724. static const struct udevice_id designware_eth_ids[] = {
  725. { .compatible = "allwinner,sun7i-a20-gmac" },
  726. { .compatible = "amlogic,meson6-dwmac" },
  727. { .compatible = "amlogic,meson-gx-dwmac" },
  728. { .compatible = "amlogic,meson-gxbb-dwmac" },
  729. { .compatible = "amlogic,meson-axg-dwmac" },
  730. { .compatible = "st,stm32-dwmac" },
  731. { .compatible = "snps,arc-dwmac-3.70a" },
  732. { }
  733. };
  734. U_BOOT_DRIVER(eth_designware) = {
  735. .name = "eth_designware",
  736. .id = UCLASS_ETH,
  737. .of_match = designware_eth_ids,
  738. .ofdata_to_platdata = designware_eth_ofdata_to_platdata,
  739. .bind = designware_eth_bind,
  740. .probe = designware_eth_probe,
  741. .remove = designware_eth_remove,
  742. .ops = &designware_eth_ops,
  743. .priv_auto_alloc_size = sizeof(struct dw_eth_dev),
  744. .platdata_auto_alloc_size = sizeof(struct dw_eth_pdata),
  745. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  746. };
  747. static struct pci_device_id supported[] = {
  748. { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
  749. { }
  750. };
  751. U_BOOT_PCI_DEVICE(eth_designware, supported);
  752. #endif