mtk_eth.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018 MediaTek Inc.
  4. *
  5. * Author: Weijie Gao <weijie.gao@mediatek.com>
  6. * Author: Mark Lee <mark-mc.lee@mediatek.com>
  7. */
  8. #include <common.h>
  9. #include <dm.h>
  10. #include <malloc.h>
  11. #include <miiphy.h>
  12. #include <regmap.h>
  13. #include <reset.h>
  14. #include <syscon.h>
  15. #include <wait_bit.h>
  16. #include <asm/gpio.h>
  17. #include <asm/io.h>
  18. #include <linux/err.h>
  19. #include <linux/ioport.h>
  20. #include <linux/mdio.h>
  21. #include <linux/mii.h>
  22. #include "mtk_eth.h"
  23. #define NUM_TX_DESC 24
  24. #define NUM_RX_DESC 24
  25. #define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
  26. #define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
  27. #define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
  28. #define MT7530_NUM_PHYS 5
  29. #define MT7530_DFL_SMI_ADDR 31
  30. #define MT7530_PHY_ADDR(base, addr) \
  31. (((base) + (addr)) & 0x1f)
  32. #define GDMA_FWD_TO_CPU \
  33. (0x20000000 | \
  34. GDM_ICS_EN | \
  35. GDM_TCS_EN | \
  36. GDM_UCS_EN | \
  37. STRP_CRC | \
  38. (DP_PDMA << MYMAC_DP_S) | \
  39. (DP_PDMA << BC_DP_S) | \
  40. (DP_PDMA << MC_DP_S) | \
  41. (DP_PDMA << UN_DP_S))
  42. #define GDMA_FWD_DISCARD \
  43. (0x20000000 | \
  44. GDM_ICS_EN | \
  45. GDM_TCS_EN | \
  46. GDM_UCS_EN | \
  47. STRP_CRC | \
  48. (DP_DISCARD << MYMAC_DP_S) | \
  49. (DP_DISCARD << BC_DP_S) | \
  50. (DP_DISCARD << MC_DP_S) | \
  51. (DP_DISCARD << UN_DP_S))
  52. struct pdma_rxd_info1 {
  53. u32 PDP0;
  54. };
  55. struct pdma_rxd_info2 {
  56. u32 PLEN1 : 14;
  57. u32 LS1 : 1;
  58. u32 UN_USED : 1;
  59. u32 PLEN0 : 14;
  60. u32 LS0 : 1;
  61. u32 DDONE : 1;
  62. };
  63. struct pdma_rxd_info3 {
  64. u32 PDP1;
  65. };
  66. struct pdma_rxd_info4 {
  67. u32 FOE_ENTRY : 14;
  68. u32 CRSN : 5;
  69. u32 SP : 3;
  70. u32 L4F : 1;
  71. u32 L4VLD : 1;
  72. u32 TACK : 1;
  73. u32 IP4F : 1;
  74. u32 IP4 : 1;
  75. u32 IP6 : 1;
  76. u32 UN_USED : 4;
  77. };
  78. struct pdma_rxdesc {
  79. struct pdma_rxd_info1 rxd_info1;
  80. struct pdma_rxd_info2 rxd_info2;
  81. struct pdma_rxd_info3 rxd_info3;
  82. struct pdma_rxd_info4 rxd_info4;
  83. };
  84. struct pdma_txd_info1 {
  85. u32 SDP0;
  86. };
  87. struct pdma_txd_info2 {
  88. u32 SDL1 : 14;
  89. u32 LS1 : 1;
  90. u32 BURST : 1;
  91. u32 SDL0 : 14;
  92. u32 LS0 : 1;
  93. u32 DDONE : 1;
  94. };
  95. struct pdma_txd_info3 {
  96. u32 SDP1;
  97. };
  98. struct pdma_txd_info4 {
  99. u32 VLAN_TAG : 16;
  100. u32 INS : 1;
  101. u32 RESV : 2;
  102. u32 UDF : 6;
  103. u32 FPORT : 3;
  104. u32 TSO : 1;
  105. u32 TUI_CO : 3;
  106. };
  107. struct pdma_txdesc {
  108. struct pdma_txd_info1 txd_info1;
  109. struct pdma_txd_info2 txd_info2;
  110. struct pdma_txd_info3 txd_info3;
  111. struct pdma_txd_info4 txd_info4;
  112. };
  113. enum mtk_switch {
  114. SW_NONE,
  115. SW_MT7530
  116. };
  117. enum mtk_soc {
  118. SOC_MT7623,
  119. SOC_MT7629
  120. };
  121. struct mtk_eth_priv {
  122. char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
  123. struct pdma_txdesc *tx_ring_noc;
  124. struct pdma_rxdesc *rx_ring_noc;
  125. int rx_dma_owner_idx0;
  126. int tx_cpu_owner_idx0;
  127. void __iomem *fe_base;
  128. void __iomem *gmac_base;
  129. void __iomem *ethsys_base;
  130. struct mii_dev *mdio_bus;
  131. int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
  132. int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
  133. int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
  134. int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
  135. u16 val);
  136. enum mtk_soc soc;
  137. int gmac_id;
  138. int force_mode;
  139. int speed;
  140. int duplex;
  141. struct phy_device *phydev;
  142. int phy_interface;
  143. int phy_addr;
  144. enum mtk_switch sw;
  145. int (*switch_init)(struct mtk_eth_priv *priv);
  146. u32 mt7530_smi_addr;
  147. u32 mt7530_phy_base;
  148. struct gpio_desc rst_gpio;
  149. int mcm;
  150. struct reset_ctl rst_fe;
  151. struct reset_ctl rst_mcm;
  152. };
  153. static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
  154. {
  155. writel(val, priv->fe_base + PDMA_BASE + reg);
  156. }
  157. static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
  158. u32 set)
  159. {
  160. clrsetbits_le32(priv->fe_base + PDMA_BASE + reg, clr, set);
  161. }
  162. static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
  163. u32 val)
  164. {
  165. u32 gdma_base;
  166. if (no == 1)
  167. gdma_base = GDMA2_BASE;
  168. else
  169. gdma_base = GDMA1_BASE;
  170. writel(val, priv->fe_base + gdma_base + reg);
  171. }
  172. static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
  173. {
  174. return readl(priv->gmac_base + reg);
  175. }
  176. static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
  177. {
  178. writel(val, priv->gmac_base + reg);
  179. }
  180. static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
  181. {
  182. clrsetbits_le32(priv->gmac_base + reg, clr, set);
  183. }
  184. static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
  185. u32 set)
  186. {
  187. clrsetbits_le32(priv->ethsys_base + reg, clr, set);
  188. }
  189. /* Direct MDIO clause 22/45 access via SoC */
  190. static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
  191. u32 cmd, u32 st)
  192. {
  193. int ret;
  194. u32 val;
  195. val = (st << MDIO_ST_S) |
  196. ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
  197. (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
  198. (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
  199. if (cmd == MDIO_CMD_WRITE)
  200. val |= data & MDIO_RW_DATA_M;
  201. mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
  202. ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
  203. PHY_ACS_ST, 0, 5000, 0);
  204. if (ret) {
  205. pr_warn("MDIO access timeout\n");
  206. return ret;
  207. }
  208. if (cmd == MDIO_CMD_READ) {
  209. val = mtk_gmac_read(priv, GMAC_PIAC_REG);
  210. return val & MDIO_RW_DATA_M;
  211. }
  212. return 0;
  213. }
  214. /* Direct MDIO clause 22 read via SoC */
  215. static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
  216. {
  217. return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
  218. }
  219. /* Direct MDIO clause 22 write via SoC */
  220. static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
  221. {
  222. return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
  223. }
  224. /* Direct MDIO clause 45 read via SoC */
  225. static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
  226. {
  227. int ret;
  228. ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
  229. if (ret)
  230. return ret;
  231. return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
  232. MDIO_ST_C45);
  233. }
  234. /* Direct MDIO clause 45 write via SoC */
  235. static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
  236. u16 reg, u16 val)
  237. {
  238. int ret;
  239. ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
  240. if (ret)
  241. return ret;
  242. return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
  243. MDIO_ST_C45);
  244. }
  245. /* Indirect MDIO clause 45 read via MII registers */
  246. static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
  247. u16 reg)
  248. {
  249. int ret;
  250. ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
  251. (MMD_ADDR << MMD_CMD_S) |
  252. ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
  253. if (ret)
  254. return ret;
  255. ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
  256. if (ret)
  257. return ret;
  258. ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
  259. (MMD_DATA << MMD_CMD_S) |
  260. ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
  261. if (ret)
  262. return ret;
  263. return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
  264. }
  265. /* Indirect MDIO clause 45 write via MII registers */
  266. static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
  267. u16 reg, u16 val)
  268. {
  269. int ret;
  270. ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
  271. (MMD_ADDR << MMD_CMD_S) |
  272. ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
  273. if (ret)
  274. return ret;
  275. ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
  276. if (ret)
  277. return ret;
  278. ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
  279. (MMD_DATA << MMD_CMD_S) |
  280. ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
  281. if (ret)
  282. return ret;
  283. return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
  284. }
  285. static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
  286. {
  287. struct mtk_eth_priv *priv = bus->priv;
  288. if (devad < 0)
  289. return priv->mii_read(priv, addr, reg);
  290. else
  291. return priv->mmd_read(priv, addr, devad, reg);
  292. }
  293. static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
  294. u16 val)
  295. {
  296. struct mtk_eth_priv *priv = bus->priv;
  297. if (devad < 0)
  298. return priv->mii_write(priv, addr, reg, val);
  299. else
  300. return priv->mmd_write(priv, addr, devad, reg, val);
  301. }
  302. static int mtk_mdio_register(struct udevice *dev)
  303. {
  304. struct mtk_eth_priv *priv = dev_get_priv(dev);
  305. struct mii_dev *mdio_bus = mdio_alloc();
  306. int ret;
  307. if (!mdio_bus)
  308. return -ENOMEM;
  309. /* Assign MDIO access APIs according to the switch/phy */
  310. switch (priv->sw) {
  311. case SW_MT7530:
  312. priv->mii_read = mtk_mii_read;
  313. priv->mii_write = mtk_mii_write;
  314. priv->mmd_read = mtk_mmd_ind_read;
  315. priv->mmd_write = mtk_mmd_ind_write;
  316. break;
  317. default:
  318. priv->mii_read = mtk_mii_read;
  319. priv->mii_write = mtk_mii_write;
  320. priv->mmd_read = mtk_mmd_read;
  321. priv->mmd_write = mtk_mmd_write;
  322. }
  323. mdio_bus->read = mtk_mdio_read;
  324. mdio_bus->write = mtk_mdio_write;
  325. snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
  326. mdio_bus->priv = (void *)priv;
  327. ret = mdio_register(mdio_bus);
  328. if (ret)
  329. return ret;
  330. priv->mdio_bus = mdio_bus;
  331. return 0;
  332. }
  333. /*
  334. * MT7530 Internal Register Address Bits
  335. * -------------------------------------------------------------------
  336. * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
  337. * |----------------------------------------|---------------|--------|
  338. * | Page Address | Reg Address | Unused |
  339. * -------------------------------------------------------------------
  340. */
  341. static int mt7530_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
  342. {
  343. int ret, low_word, high_word;
  344. /* Write page address */
  345. ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
  346. if (ret)
  347. return ret;
  348. /* Read low word */
  349. low_word = mtk_mii_read(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf);
  350. if (low_word < 0)
  351. return low_word;
  352. /* Read high word */
  353. high_word = mtk_mii_read(priv, priv->mt7530_smi_addr, 0x10);
  354. if (high_word < 0)
  355. return high_word;
  356. if (data)
  357. *data = ((u32)high_word << 16) | (low_word & 0xffff);
  358. return 0;
  359. }
  360. static int mt7530_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
  361. {
  362. int ret;
  363. /* Write page address */
  364. ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
  365. if (ret)
  366. return ret;
  367. /* Write low word */
  368. ret = mtk_mii_write(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf,
  369. data & 0xffff);
  370. if (ret)
  371. return ret;
  372. /* Write high word */
  373. return mtk_mii_write(priv, priv->mt7530_smi_addr, 0x10, data >> 16);
  374. }
  375. static void mt7530_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
  376. u32 set)
  377. {
  378. u32 val;
  379. mt7530_reg_read(priv, reg, &val);
  380. val &= ~clr;
  381. val |= set;
  382. mt7530_reg_write(priv, reg, val);
  383. }
  384. static void mt7530_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
  385. {
  386. u8 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, 0);
  387. mtk_mmd_ind_write(priv, phy_addr, 0x1f, reg, val);
  388. }
  389. static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
  390. {
  391. u32 ncpo1, ssc_delta;
  392. switch (mode) {
  393. case PHY_INTERFACE_MODE_RGMII:
  394. ncpo1 = 0x0c80;
  395. ssc_delta = 0x87;
  396. break;
  397. default:
  398. printf("error: xMII mode %d not supported\n", mode);
  399. return -EINVAL;
  400. }
  401. /* Disable MT7530 core clock */
  402. mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
  403. /* Disable MT7530 PLL */
  404. mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
  405. (2 << RG_GSWPLL_POSDIV_200M_S) |
  406. (32 << RG_GSWPLL_FBKDIV_200M_S));
  407. /* For MT7530 core clock = 500Mhz */
  408. mt7530_core_reg_write(priv, CORE_GSWPLL_GRP2,
  409. (1 << RG_GSWPLL_POSDIV_500M_S) |
  410. (25 << RG_GSWPLL_FBKDIV_500M_S));
  411. /* Enable MT7530 PLL */
  412. mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
  413. (2 << RG_GSWPLL_POSDIV_200M_S) |
  414. (32 << RG_GSWPLL_FBKDIV_200M_S) |
  415. RG_GSWPLL_EN_PRE);
  416. udelay(20);
  417. mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
  418. /* Setup the MT7530 TRGMII Tx Clock */
  419. mt7530_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
  420. mt7530_core_reg_write(priv, CORE_PLL_GROUP6, 0);
  421. mt7530_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
  422. mt7530_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
  423. mt7530_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
  424. RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
  425. mt7530_core_reg_write(priv, CORE_PLL_GROUP2,
  426. RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
  427. (1 << RG_SYSPLL_POSDIV_S));
  428. mt7530_core_reg_write(priv, CORE_PLL_GROUP7,
  429. RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
  430. RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
  431. /* Enable MT7530 core clock */
  432. mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
  433. REG_GSWCK_EN | REG_TRGMIICK_EN);
  434. return 0;
  435. }
  436. static int mt7530_setup(struct mtk_eth_priv *priv)
  437. {
  438. u16 phy_addr, phy_val;
  439. u32 val;
  440. int i;
  441. /* Select 250MHz clk for RGMII mode */
  442. mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
  443. ETHSYS_TRGMII_CLK_SEL362_5, 0);
  444. /* Global reset switch */
  445. if (priv->mcm) {
  446. reset_assert(&priv->rst_mcm);
  447. udelay(1000);
  448. reset_deassert(&priv->rst_mcm);
  449. mdelay(1000);
  450. } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
  451. dm_gpio_set_value(&priv->rst_gpio, 0);
  452. udelay(1000);
  453. dm_gpio_set_value(&priv->rst_gpio, 1);
  454. mdelay(1000);
  455. }
  456. /* Modify HWTRAP first to allow direct access to internal PHYs */
  457. mt7530_reg_read(priv, HWTRAP_REG, &val);
  458. val |= CHG_TRAP;
  459. val &= ~C_MDIO_BPS;
  460. mt7530_reg_write(priv, MHWTRAP_REG, val);
  461. /* Calculate the phy base address */
  462. val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
  463. priv->mt7530_phy_base = (val | 0x7) + 1;
  464. /* Turn off PHYs */
  465. for (i = 0; i < MT7530_NUM_PHYS; i++) {
  466. phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
  467. phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
  468. phy_val |= BMCR_PDOWN;
  469. priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
  470. }
  471. /* Force MAC link down before reset */
  472. mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
  473. mt7530_reg_write(priv, PCMR_REG(6), FORCE_MODE);
  474. /* MT7530 reset */
  475. mt7530_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
  476. udelay(100);
  477. val = (1 << IPG_CFG_S) |
  478. MAC_MODE | FORCE_MODE |
  479. MAC_TX_EN | MAC_RX_EN |
  480. BKOFF_EN | BACKPR_EN |
  481. (SPEED_1000M << FORCE_SPD_S) |
  482. FORCE_DPX | FORCE_LINK;
  483. /* MT7530 Port6: Forced 1000M/FD, FC disabled */
  484. mt7530_reg_write(priv, PCMR_REG(6), val);
  485. /* MT7530 Port5: Forced link down */
  486. mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
  487. /* MT7530 Port6: Set to RGMII */
  488. mt7530_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
  489. /* Hardware Trap: Enable Port6, Disable Port5 */
  490. mt7530_reg_read(priv, HWTRAP_REG, &val);
  491. val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
  492. (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
  493. (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
  494. val &= ~(C_MDIO_BPS | P6_INTF_DIS);
  495. mt7530_reg_write(priv, MHWTRAP_REG, val);
  496. /* Setup switch core pll */
  497. mt7530_pad_clk_setup(priv, priv->phy_interface);
  498. /* Lower Tx Driving for TRGMII path */
  499. for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
  500. mt7530_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
  501. (8 << TD_DM_DRVP_S) | (8 << TD_DM_DRVN_S));
  502. for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
  503. mt7530_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
  504. /* Turn on PHYs */
  505. for (i = 0; i < MT7530_NUM_PHYS; i++) {
  506. phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
  507. phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
  508. phy_val &= ~BMCR_PDOWN;
  509. priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
  510. }
  511. /* Set port isolation */
  512. for (i = 0; i < 8; i++) {
  513. /* Set port matrix mode */
  514. if (i != 6)
  515. mt7530_reg_write(priv, PCR_REG(i),
  516. (0x40 << PORT_MATRIX_S));
  517. else
  518. mt7530_reg_write(priv, PCR_REG(i),
  519. (0x3f << PORT_MATRIX_S));
  520. /* Set port mode to user port */
  521. mt7530_reg_write(priv, PVC_REG(i),
  522. (0x8100 << STAG_VPID_S) |
  523. (VLAN_ATTR_USER << VLAN_ATTR_S));
  524. }
  525. return 0;
  526. }
  527. static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
  528. {
  529. u16 lcl_adv = 0, rmt_adv = 0;
  530. u8 flowctrl;
  531. u32 mcr;
  532. mcr = (1 << IPG_CFG_S) |
  533. (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
  534. MAC_MODE | FORCE_MODE |
  535. MAC_TX_EN | MAC_RX_EN |
  536. BKOFF_EN | BACKPR_EN;
  537. switch (priv->phydev->speed) {
  538. case SPEED_10:
  539. mcr |= (SPEED_10M << FORCE_SPD_S);
  540. break;
  541. case SPEED_100:
  542. mcr |= (SPEED_100M << FORCE_SPD_S);
  543. break;
  544. case SPEED_1000:
  545. mcr |= (SPEED_1000M << FORCE_SPD_S);
  546. break;
  547. };
  548. if (priv->phydev->link)
  549. mcr |= FORCE_LINK;
  550. if (priv->phydev->duplex) {
  551. mcr |= FORCE_DPX;
  552. if (priv->phydev->pause)
  553. rmt_adv = LPA_PAUSE_CAP;
  554. if (priv->phydev->asym_pause)
  555. rmt_adv |= LPA_PAUSE_ASYM;
  556. if (priv->phydev->advertising & ADVERTISED_Pause)
  557. lcl_adv |= ADVERTISE_PAUSE_CAP;
  558. if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
  559. lcl_adv |= ADVERTISE_PAUSE_ASYM;
  560. flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
  561. if (flowctrl & FLOW_CTRL_TX)
  562. mcr |= FORCE_TX_FC;
  563. if (flowctrl & FLOW_CTRL_RX)
  564. mcr |= FORCE_RX_FC;
  565. debug("rx pause %s, tx pause %s\n",
  566. flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
  567. flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
  568. }
  569. mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
  570. }
  571. static int mtk_phy_start(struct mtk_eth_priv *priv)
  572. {
  573. struct phy_device *phydev = priv->phydev;
  574. int ret;
  575. ret = phy_startup(phydev);
  576. if (ret) {
  577. debug("Could not initialize PHY %s\n", phydev->dev->name);
  578. return ret;
  579. }
  580. if (!phydev->link) {
  581. debug("%s: link down.\n", phydev->dev->name);
  582. return 0;
  583. }
  584. mtk_phy_link_adjust(priv);
  585. debug("Speed: %d, %s duplex%s\n", phydev->speed,
  586. (phydev->duplex) ? "full" : "half",
  587. (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
  588. return 0;
  589. }
  590. static int mtk_phy_probe(struct udevice *dev)
  591. {
  592. struct mtk_eth_priv *priv = dev_get_priv(dev);
  593. struct phy_device *phydev;
  594. phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
  595. priv->phy_interface);
  596. if (!phydev)
  597. return -ENODEV;
  598. phydev->supported &= PHY_GBIT_FEATURES;
  599. phydev->advertising = phydev->supported;
  600. priv->phydev = phydev;
  601. phy_config(phydev);
  602. return 0;
  603. }
  604. static void mtk_mac_init(struct mtk_eth_priv *priv)
  605. {
  606. int i, ge_mode = 0;
  607. u32 mcr;
  608. switch (priv->phy_interface) {
  609. case PHY_INTERFACE_MODE_RGMII_RXID:
  610. case PHY_INTERFACE_MODE_RGMII:
  611. case PHY_INTERFACE_MODE_SGMII:
  612. ge_mode = GE_MODE_RGMII;
  613. break;
  614. case PHY_INTERFACE_MODE_MII:
  615. case PHY_INTERFACE_MODE_GMII:
  616. ge_mode = GE_MODE_MII;
  617. break;
  618. case PHY_INTERFACE_MODE_RMII:
  619. ge_mode = GE_MODE_RMII;
  620. break;
  621. default:
  622. break;
  623. }
  624. /* set the gmac to the right mode */
  625. mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
  626. SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
  627. ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
  628. if (priv->force_mode) {
  629. mcr = (1 << IPG_CFG_S) |
  630. (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
  631. MAC_MODE | FORCE_MODE |
  632. MAC_TX_EN | MAC_RX_EN |
  633. BKOFF_EN | BACKPR_EN |
  634. FORCE_LINK;
  635. switch (priv->speed) {
  636. case SPEED_10:
  637. mcr |= SPEED_10M << FORCE_SPD_S;
  638. break;
  639. case SPEED_100:
  640. mcr |= SPEED_100M << FORCE_SPD_S;
  641. break;
  642. case SPEED_1000:
  643. mcr |= SPEED_1000M << FORCE_SPD_S;
  644. break;
  645. }
  646. if (priv->duplex)
  647. mcr |= FORCE_DPX;
  648. mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
  649. }
  650. if (priv->soc == SOC_MT7623) {
  651. /* Lower Tx Driving for TRGMII path */
  652. for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
  653. mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
  654. (8 << TD_DM_DRVP_S) |
  655. (8 << TD_DM_DRVN_S));
  656. mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
  657. RX_RST | RXC_DQSISEL);
  658. mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
  659. }
  660. }
  661. static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
  662. {
  663. char *pkt_base = priv->pkt_pool;
  664. int i;
  665. mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
  666. udelay(500);
  667. memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
  668. memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
  669. memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
  670. flush_dcache_range((u32)pkt_base, (u32)(pkt_base + TOTAL_PKT_BUF_SIZE));
  671. priv->rx_dma_owner_idx0 = 0;
  672. priv->tx_cpu_owner_idx0 = 0;
  673. for (i = 0; i < NUM_TX_DESC; i++) {
  674. priv->tx_ring_noc[i].txd_info2.LS0 = 1;
  675. priv->tx_ring_noc[i].txd_info2.DDONE = 1;
  676. priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
  677. priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
  678. pkt_base += PKTSIZE_ALIGN;
  679. }
  680. for (i = 0; i < NUM_RX_DESC; i++) {
  681. priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
  682. priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
  683. pkt_base += PKTSIZE_ALIGN;
  684. }
  685. mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
  686. virt_to_phys(priv->tx_ring_noc));
  687. mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
  688. mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
  689. mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
  690. virt_to_phys(priv->rx_ring_noc));
  691. mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
  692. mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
  693. mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
  694. }
  695. static int mtk_eth_start(struct udevice *dev)
  696. {
  697. struct mtk_eth_priv *priv = dev_get_priv(dev);
  698. int ret;
  699. /* Reset FE */
  700. reset_assert(&priv->rst_fe);
  701. udelay(1000);
  702. reset_deassert(&priv->rst_fe);
  703. mdelay(10);
  704. /* Packets forward to PDMA */
  705. mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
  706. if (priv->gmac_id == 0)
  707. mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
  708. else
  709. mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
  710. udelay(500);
  711. mtk_eth_fifo_init(priv);
  712. /* Start PHY */
  713. if (priv->sw == SW_NONE) {
  714. ret = mtk_phy_start(priv);
  715. if (ret)
  716. return ret;
  717. }
  718. mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
  719. TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
  720. udelay(500);
  721. return 0;
  722. }
  723. static void mtk_eth_stop(struct udevice *dev)
  724. {
  725. struct mtk_eth_priv *priv = dev_get_priv(dev);
  726. mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
  727. TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
  728. udelay(500);
  729. wait_for_bit_le32(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG_REG,
  730. RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
  731. }
  732. static int mtk_eth_write_hwaddr(struct udevice *dev)
  733. {
  734. struct eth_pdata *pdata = dev_get_platdata(dev);
  735. struct mtk_eth_priv *priv = dev_get_priv(dev);
  736. unsigned char *mac = pdata->enetaddr;
  737. u32 macaddr_lsb, macaddr_msb;
  738. macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
  739. macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
  740. ((u32)mac[4] << 8) | (u32)mac[5];
  741. mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
  742. mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
  743. return 0;
  744. }
  745. static int mtk_eth_send(struct udevice *dev, void *packet, int length)
  746. {
  747. struct mtk_eth_priv *priv = dev_get_priv(dev);
  748. u32 idx = priv->tx_cpu_owner_idx0;
  749. void *pkt_base;
  750. if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
  751. debug("mtk-eth: TX DMA descriptor ring is full\n");
  752. return -EPERM;
  753. }
  754. pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
  755. memcpy(pkt_base, packet, length);
  756. flush_dcache_range((u32)pkt_base, (u32)pkt_base +
  757. roundup(length, ARCH_DMA_MINALIGN));
  758. priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
  759. priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
  760. priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
  761. mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
  762. return 0;
  763. }
  764. static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
  765. {
  766. struct mtk_eth_priv *priv = dev_get_priv(dev);
  767. u32 idx = priv->rx_dma_owner_idx0;
  768. uchar *pkt_base;
  769. u32 length;
  770. if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
  771. debug("mtk-eth: RX DMA descriptor ring is empty\n");
  772. return -EAGAIN;
  773. }
  774. length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
  775. pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
  776. invalidate_dcache_range((u32)pkt_base, (u32)pkt_base +
  777. roundup(length, ARCH_DMA_MINALIGN));
  778. if (packetp)
  779. *packetp = pkt_base;
  780. return length;
  781. }
  782. static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
  783. {
  784. struct mtk_eth_priv *priv = dev_get_priv(dev);
  785. u32 idx = priv->rx_dma_owner_idx0;
  786. priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
  787. priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
  788. priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
  789. mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
  790. priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
  791. return 0;
  792. }
  793. static int mtk_eth_probe(struct udevice *dev)
  794. {
  795. struct eth_pdata *pdata = dev_get_platdata(dev);
  796. struct mtk_eth_priv *priv = dev_get_priv(dev);
  797. u32 iobase = pdata->iobase;
  798. int ret;
  799. /* Frame Engine Register Base */
  800. priv->fe_base = (void *)iobase;
  801. /* GMAC Register Base */
  802. priv->gmac_base = (void *)(iobase + GMAC_BASE);
  803. /* MDIO register */
  804. ret = mtk_mdio_register(dev);
  805. if (ret)
  806. return ret;
  807. /* Prepare for tx/rx rings */
  808. priv->tx_ring_noc = (struct pdma_txdesc *)
  809. noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
  810. ARCH_DMA_MINALIGN);
  811. priv->rx_ring_noc = (struct pdma_rxdesc *)
  812. noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
  813. ARCH_DMA_MINALIGN);
  814. /* Set MAC mode */
  815. mtk_mac_init(priv);
  816. /* Probe phy if switch is not specified */
  817. if (priv->sw == SW_NONE)
  818. return mtk_phy_probe(dev);
  819. /* Initialize switch */
  820. return priv->switch_init(priv);
  821. }
  822. static int mtk_eth_remove(struct udevice *dev)
  823. {
  824. struct mtk_eth_priv *priv = dev_get_priv(dev);
  825. /* MDIO unregister */
  826. mdio_unregister(priv->mdio_bus);
  827. mdio_free(priv->mdio_bus);
  828. /* Stop possibly started DMA */
  829. mtk_eth_stop(dev);
  830. return 0;
  831. }
  832. static int mtk_eth_ofdata_to_platdata(struct udevice *dev)
  833. {
  834. struct eth_pdata *pdata = dev_get_platdata(dev);
  835. struct mtk_eth_priv *priv = dev_get_priv(dev);
  836. struct ofnode_phandle_args args;
  837. struct regmap *regmap;
  838. const char *str;
  839. ofnode subnode;
  840. int ret;
  841. priv->soc = dev_get_driver_data(dev);
  842. pdata->iobase = devfdt_get_addr(dev);
  843. /* get corresponding ethsys phandle */
  844. ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
  845. &args);
  846. if (ret)
  847. return ret;
  848. regmap = syscon_node_to_regmap(args.node);
  849. if (IS_ERR(regmap))
  850. return PTR_ERR(regmap);
  851. priv->ethsys_base = regmap_get_range(regmap, 0);
  852. if (!priv->ethsys_base) {
  853. dev_err(dev, "Unable to find ethsys\n");
  854. return -ENODEV;
  855. }
  856. /* Reset controllers */
  857. ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
  858. if (ret) {
  859. printf("error: Unable to get reset ctrl for frame engine\n");
  860. return ret;
  861. }
  862. priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
  863. /* Interface mode is required */
  864. str = dev_read_string(dev, "phy-mode");
  865. if (str) {
  866. pdata->phy_interface = phy_get_interface_by_name(str);
  867. priv->phy_interface = pdata->phy_interface;
  868. } else {
  869. printf("error: phy-mode is not set\n");
  870. return -EINVAL;
  871. }
  872. /* Force mode or autoneg */
  873. subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
  874. if (ofnode_valid(subnode)) {
  875. priv->force_mode = 1;
  876. priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
  877. priv->duplex = ofnode_read_bool(subnode, "full-duplex");
  878. if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
  879. priv->speed != SPEED_1000) {
  880. printf("error: no valid speed set in fixed-link\n");
  881. return -EINVAL;
  882. }
  883. }
  884. /* check for switch first, otherwise phy will be used */
  885. priv->sw = SW_NONE;
  886. priv->switch_init = NULL;
  887. str = dev_read_string(dev, "mediatek,switch");
  888. if (str) {
  889. if (!strcmp(str, "mt7530")) {
  890. priv->sw = SW_MT7530;
  891. priv->switch_init = mt7530_setup;
  892. priv->mt7530_smi_addr = MT7530_DFL_SMI_ADDR;
  893. } else {
  894. printf("error: unsupported switch\n");
  895. return -EINVAL;
  896. }
  897. priv->mcm = dev_read_bool(dev, "mediatek,mcm");
  898. if (priv->mcm) {
  899. ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
  900. if (ret) {
  901. printf("error: no reset ctrl for mcm\n");
  902. return ret;
  903. }
  904. } else {
  905. gpio_request_by_name(dev, "reset-gpios", 0,
  906. &priv->rst_gpio, GPIOD_IS_OUT);
  907. }
  908. } else {
  909. ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
  910. 0, &args);
  911. if (ret) {
  912. printf("error: phy-handle is not specified\n");
  913. return ret;
  914. }
  915. priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
  916. if (priv->phy_addr < 0) {
  917. printf("error: phy address is not specified\n");
  918. return ret;
  919. }
  920. }
  921. return 0;
  922. }
  923. static const struct udevice_id mtk_eth_ids[] = {
  924. { .compatible = "mediatek,mt7629-eth", .data = SOC_MT7629 },
  925. { .compatible = "mediatek,mt7623-eth", .data = SOC_MT7623 },
  926. {}
  927. };
  928. static const struct eth_ops mtk_eth_ops = {
  929. .start = mtk_eth_start,
  930. .stop = mtk_eth_stop,
  931. .send = mtk_eth_send,
  932. .recv = mtk_eth_recv,
  933. .free_pkt = mtk_eth_free_pkt,
  934. .write_hwaddr = mtk_eth_write_hwaddr,
  935. };
  936. U_BOOT_DRIVER(mtk_eth) = {
  937. .name = "mtk-eth",
  938. .id = UCLASS_ETH,
  939. .of_match = mtk_eth_ids,
  940. .ofdata_to_platdata = mtk_eth_ofdata_to_platdata,
  941. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  942. .probe = mtk_eth_probe,
  943. .remove = mtk_eth_remove,
  944. .ops = &mtk_eth_ops,
  945. .priv_auto_alloc_size = sizeof(struct mtk_eth_priv),
  946. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  947. };