macb.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2005-2006 Atmel Corporation
  4. */
  5. #include <common.h>
  6. #include <clk.h>
  7. #include <cpu_func.h>
  8. #include <dm.h>
  9. #include <log.h>
  10. #include <linux/delay.h>
  11. /*
  12. * The u-boot networking stack is a little weird. It seems like the
  13. * networking core allocates receive buffers up front without any
  14. * regard to the hardware that's supposed to actually receive those
  15. * packets.
  16. *
  17. * The MACB receives packets into 128-byte receive buffers, so the
  18. * buffers allocated by the core isn't very practical to use. We'll
  19. * allocate our own, but we need one such buffer in case a packet
  20. * wraps around the DMA ring so that we have to copy it.
  21. *
  22. * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
  23. * configuration header. This way, the core allocates one RX buffer
  24. * and one TX buffer, each of which can hold a ethernet packet of
  25. * maximum size.
  26. *
  27. * For some reason, the networking core unconditionally specifies a
  28. * 32-byte packet "alignment" (which really should be called
  29. * "padding"). MACB shouldn't need that, but we'll refrain from any
  30. * core modifications here...
  31. */
  32. #include <net.h>
  33. #ifndef CONFIG_DM_ETH
  34. #include <netdev.h>
  35. #endif
  36. #include <malloc.h>
  37. #include <miiphy.h>
  38. #include <linux/mii.h>
  39. #include <asm/io.h>
  40. #include <linux/dma-mapping.h>
  41. #include <asm/arch/clk.h>
  42. #include <linux/errno.h>
  43. #include "macb.h"
  44. DECLARE_GLOBAL_DATA_PTR;
  45. /*
  46. * These buffer sizes must be power of 2 and divisible
  47. * by RX_BUFFER_MULTIPLE
  48. */
  49. #define MACB_RX_BUFFER_SIZE 128
  50. #define GEM_RX_BUFFER_SIZE 2048
  51. #define RX_BUFFER_MULTIPLE 64
  52. #define MACB_RX_RING_SIZE 32
  53. #define MACB_TX_RING_SIZE 16
  54. #define MACB_TX_TIMEOUT 1000
  55. #define MACB_AUTONEG_TIMEOUT 5000000
  56. #ifdef CONFIG_MACB_ZYNQ
  57. /* INCR4 AHB bursts */
  58. #define MACB_ZYNQ_GEM_DMACR_BLENGTH 0x00000004
  59. /* Use full configured addressable space (8 Kb) */
  60. #define MACB_ZYNQ_GEM_DMACR_RXSIZE 0x00000300
  61. /* Use full configured addressable space (4 Kb) */
  62. #define MACB_ZYNQ_GEM_DMACR_TXSIZE 0x00000400
  63. /* Set RXBUF with use of 128 byte */
  64. #define MACB_ZYNQ_GEM_DMACR_RXBUF 0x00020000
  65. #define MACB_ZYNQ_GEM_DMACR_INIT \
  66. (MACB_ZYNQ_GEM_DMACR_BLENGTH | \
  67. MACB_ZYNQ_GEM_DMACR_RXSIZE | \
  68. MACB_ZYNQ_GEM_DMACR_TXSIZE | \
  69. MACB_ZYNQ_GEM_DMACR_RXBUF)
  70. #endif
  71. struct macb_dma_desc {
  72. u32 addr;
  73. u32 ctrl;
  74. };
  75. #define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc))
  76. #define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE))
  77. #define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE))
  78. #define MACB_TX_DUMMY_DMA_DESC_SIZE (DMA_DESC_BYTES(1))
  79. #define RXBUF_FRMLEN_MASK 0x00000fff
  80. #define TXBUF_FRMLEN_MASK 0x000007ff
  81. struct macb_device {
  82. void *regs;
  83. bool is_big_endian;
  84. const struct macb_config *config;
  85. unsigned int rx_tail;
  86. unsigned int tx_head;
  87. unsigned int tx_tail;
  88. unsigned int next_rx_tail;
  89. bool wrapped;
  90. void *rx_buffer;
  91. void *tx_buffer;
  92. struct macb_dma_desc *rx_ring;
  93. struct macb_dma_desc *tx_ring;
  94. size_t rx_buffer_size;
  95. unsigned long rx_buffer_dma;
  96. unsigned long rx_ring_dma;
  97. unsigned long tx_ring_dma;
  98. struct macb_dma_desc *dummy_desc;
  99. unsigned long dummy_desc_dma;
  100. const struct device *dev;
  101. #ifndef CONFIG_DM_ETH
  102. struct eth_device netdev;
  103. #endif
  104. unsigned short phy_addr;
  105. struct mii_dev *bus;
  106. #ifdef CONFIG_PHYLIB
  107. struct phy_device *phydev;
  108. #endif
  109. #ifdef CONFIG_DM_ETH
  110. #ifdef CONFIG_CLK
  111. unsigned long pclk_rate;
  112. #endif
  113. phy_interface_t phy_interface;
  114. #endif
  115. };
  116. struct macb_config {
  117. unsigned int dma_burst_length;
  118. int (*clk_init)(struct udevice *dev, ulong rate);
  119. };
  120. #ifndef CONFIG_DM_ETH
  121. #define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
  122. #endif
  123. static int macb_is_gem(struct macb_device *macb)
  124. {
  125. return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) >= 0x2;
  126. }
  127. #ifndef cpu_is_sama5d2
  128. #define cpu_is_sama5d2() 0
  129. #endif
  130. #ifndef cpu_is_sama5d4
  131. #define cpu_is_sama5d4() 0
  132. #endif
  133. static int gem_is_gigabit_capable(struct macb_device *macb)
  134. {
  135. /*
  136. * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
  137. * configured to support only 10/100.
  138. */
  139. return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
  140. }
  141. static void macb_mdio_write(struct macb_device *macb, u8 phy_adr, u8 reg,
  142. u16 value)
  143. {
  144. unsigned long netctl;
  145. unsigned long netstat;
  146. unsigned long frame;
  147. netctl = macb_readl(macb, NCR);
  148. netctl |= MACB_BIT(MPE);
  149. macb_writel(macb, NCR, netctl);
  150. frame = (MACB_BF(SOF, 1)
  151. | MACB_BF(RW, 1)
  152. | MACB_BF(PHYA, phy_adr)
  153. | MACB_BF(REGA, reg)
  154. | MACB_BF(CODE, 2)
  155. | MACB_BF(DATA, value));
  156. macb_writel(macb, MAN, frame);
  157. do {
  158. netstat = macb_readl(macb, NSR);
  159. } while (!(netstat & MACB_BIT(IDLE)));
  160. netctl = macb_readl(macb, NCR);
  161. netctl &= ~MACB_BIT(MPE);
  162. macb_writel(macb, NCR, netctl);
  163. }
  164. static u16 macb_mdio_read(struct macb_device *macb, u8 phy_adr, u8 reg)
  165. {
  166. unsigned long netctl;
  167. unsigned long netstat;
  168. unsigned long frame;
  169. netctl = macb_readl(macb, NCR);
  170. netctl |= MACB_BIT(MPE);
  171. macb_writel(macb, NCR, netctl);
  172. frame = (MACB_BF(SOF, 1)
  173. | MACB_BF(RW, 2)
  174. | MACB_BF(PHYA, phy_adr)
  175. | MACB_BF(REGA, reg)
  176. | MACB_BF(CODE, 2));
  177. macb_writel(macb, MAN, frame);
  178. do {
  179. netstat = macb_readl(macb, NSR);
  180. } while (!(netstat & MACB_BIT(IDLE)));
  181. frame = macb_readl(macb, MAN);
  182. netctl = macb_readl(macb, NCR);
  183. netctl &= ~MACB_BIT(MPE);
  184. macb_writel(macb, NCR, netctl);
  185. return MACB_BFEXT(DATA, frame);
  186. }
  187. void __weak arch_get_mdio_control(const char *name)
  188. {
  189. return;
  190. }
  191. #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
  192. int macb_miiphy_read(struct mii_dev *bus, int phy_adr, int devad, int reg)
  193. {
  194. u16 value = 0;
  195. #ifdef CONFIG_DM_ETH
  196. struct udevice *dev = eth_get_dev_by_name(bus->name);
  197. struct macb_device *macb = dev_get_priv(dev);
  198. #else
  199. struct eth_device *dev = eth_get_dev_by_name(bus->name);
  200. struct macb_device *macb = to_macb(dev);
  201. #endif
  202. arch_get_mdio_control(bus->name);
  203. value = macb_mdio_read(macb, phy_adr, reg);
  204. return value;
  205. }
  206. int macb_miiphy_write(struct mii_dev *bus, int phy_adr, int devad, int reg,
  207. u16 value)
  208. {
  209. #ifdef CONFIG_DM_ETH
  210. struct udevice *dev = eth_get_dev_by_name(bus->name);
  211. struct macb_device *macb = dev_get_priv(dev);
  212. #else
  213. struct eth_device *dev = eth_get_dev_by_name(bus->name);
  214. struct macb_device *macb = to_macb(dev);
  215. #endif
  216. arch_get_mdio_control(bus->name);
  217. macb_mdio_write(macb, phy_adr, reg, value);
  218. return 0;
  219. }
  220. #endif
  221. #define RX 1
  222. #define TX 0
  223. static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
  224. {
  225. if (rx)
  226. invalidate_dcache_range(macb->rx_ring_dma,
  227. ALIGN(macb->rx_ring_dma + MACB_RX_DMA_DESC_SIZE,
  228. PKTALIGN));
  229. else
  230. invalidate_dcache_range(macb->tx_ring_dma,
  231. ALIGN(macb->tx_ring_dma + MACB_TX_DMA_DESC_SIZE,
  232. PKTALIGN));
  233. }
  234. static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
  235. {
  236. if (rx)
  237. flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
  238. ALIGN(MACB_RX_DMA_DESC_SIZE, PKTALIGN));
  239. else
  240. flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
  241. ALIGN(MACB_TX_DMA_DESC_SIZE, PKTALIGN));
  242. }
  243. static inline void macb_flush_rx_buffer(struct macb_device *macb)
  244. {
  245. flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
  246. ALIGN(macb->rx_buffer_size * MACB_RX_RING_SIZE,
  247. PKTALIGN));
  248. }
  249. static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
  250. {
  251. invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
  252. ALIGN(macb->rx_buffer_size * MACB_RX_RING_SIZE,
  253. PKTALIGN));
  254. }
  255. #if defined(CONFIG_CMD_NET)
  256. static int _macb_send(struct macb_device *macb, const char *name, void *packet,
  257. int length)
  258. {
  259. unsigned long paddr, ctrl;
  260. unsigned int tx_head = macb->tx_head;
  261. int i;
  262. paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
  263. ctrl = length & TXBUF_FRMLEN_MASK;
  264. ctrl |= MACB_BIT(TX_LAST);
  265. if (tx_head == (MACB_TX_RING_SIZE - 1)) {
  266. ctrl |= MACB_BIT(TX_WRAP);
  267. macb->tx_head = 0;
  268. } else {
  269. macb->tx_head++;
  270. }
  271. macb->tx_ring[tx_head].ctrl = ctrl;
  272. macb->tx_ring[tx_head].addr = paddr;
  273. barrier();
  274. macb_flush_ring_desc(macb, TX);
  275. macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
  276. /*
  277. * I guess this is necessary because the networking core may
  278. * re-use the transmit buffer as soon as we return...
  279. */
  280. for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
  281. barrier();
  282. macb_invalidate_ring_desc(macb, TX);
  283. ctrl = macb->tx_ring[tx_head].ctrl;
  284. if (ctrl & MACB_BIT(TX_USED))
  285. break;
  286. udelay(1);
  287. }
  288. dma_unmap_single(paddr, length, DMA_TO_DEVICE);
  289. if (i <= MACB_TX_TIMEOUT) {
  290. if (ctrl & MACB_BIT(TX_UNDERRUN))
  291. printf("%s: TX underrun\n", name);
  292. if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
  293. printf("%s: TX buffers exhausted in mid frame\n", name);
  294. } else {
  295. printf("%s: TX timeout\n", name);
  296. }
  297. /* No one cares anyway */
  298. return 0;
  299. }
  300. static void reclaim_rx_buffers(struct macb_device *macb,
  301. unsigned int new_tail)
  302. {
  303. unsigned int i;
  304. i = macb->rx_tail;
  305. macb_invalidate_ring_desc(macb, RX);
  306. while (i > new_tail) {
  307. macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
  308. i++;
  309. if (i > MACB_RX_RING_SIZE)
  310. i = 0;
  311. }
  312. while (i < new_tail) {
  313. macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
  314. i++;
  315. }
  316. barrier();
  317. macb_flush_ring_desc(macb, RX);
  318. macb->rx_tail = new_tail;
  319. }
  320. static int _macb_recv(struct macb_device *macb, uchar **packetp)
  321. {
  322. unsigned int next_rx_tail = macb->next_rx_tail;
  323. void *buffer;
  324. int length;
  325. u32 status;
  326. macb->wrapped = false;
  327. for (;;) {
  328. macb_invalidate_ring_desc(macb, RX);
  329. if (!(macb->rx_ring[next_rx_tail].addr & MACB_BIT(RX_USED)))
  330. return -EAGAIN;
  331. status = macb->rx_ring[next_rx_tail].ctrl;
  332. if (status & MACB_BIT(RX_SOF)) {
  333. if (next_rx_tail != macb->rx_tail)
  334. reclaim_rx_buffers(macb, next_rx_tail);
  335. macb->wrapped = false;
  336. }
  337. if (status & MACB_BIT(RX_EOF)) {
  338. buffer = macb->rx_buffer +
  339. macb->rx_buffer_size * macb->rx_tail;
  340. length = status & RXBUF_FRMLEN_MASK;
  341. macb_invalidate_rx_buffer(macb);
  342. if (macb->wrapped) {
  343. unsigned int headlen, taillen;
  344. headlen = macb->rx_buffer_size *
  345. (MACB_RX_RING_SIZE - macb->rx_tail);
  346. taillen = length - headlen;
  347. memcpy((void *)net_rx_packets[0],
  348. buffer, headlen);
  349. memcpy((void *)net_rx_packets[0] + headlen,
  350. macb->rx_buffer, taillen);
  351. *packetp = (void *)net_rx_packets[0];
  352. } else {
  353. *packetp = buffer;
  354. }
  355. if (++next_rx_tail >= MACB_RX_RING_SIZE)
  356. next_rx_tail = 0;
  357. macb->next_rx_tail = next_rx_tail;
  358. return length;
  359. } else {
  360. if (++next_rx_tail >= MACB_RX_RING_SIZE) {
  361. macb->wrapped = true;
  362. next_rx_tail = 0;
  363. }
  364. }
  365. barrier();
  366. }
  367. }
  368. static void macb_phy_reset(struct macb_device *macb, const char *name)
  369. {
  370. int i;
  371. u16 status, adv;
  372. adv = ADVERTISE_CSMA | ADVERTISE_ALL;
  373. macb_mdio_write(macb, macb->phy_addr, MII_ADVERTISE, adv);
  374. printf("%s: Starting autonegotiation...\n", name);
  375. macb_mdio_write(macb, macb->phy_addr, MII_BMCR, (BMCR_ANENABLE
  376. | BMCR_ANRESTART));
  377. for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
  378. status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
  379. if (status & BMSR_ANEGCOMPLETE)
  380. break;
  381. udelay(100);
  382. }
  383. if (status & BMSR_ANEGCOMPLETE)
  384. printf("%s: Autonegotiation complete\n", name);
  385. else
  386. printf("%s: Autonegotiation timed out (status=0x%04x)\n",
  387. name, status);
  388. }
  389. static int macb_phy_find(struct macb_device *macb, const char *name)
  390. {
  391. int i;
  392. u16 phy_id;
  393. /* Search for PHY... */
  394. for (i = 0; i < 32; i++) {
  395. macb->phy_addr = i;
  396. phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
  397. if (phy_id != 0xffff) {
  398. printf("%s: PHY present at %d\n", name, i);
  399. return 0;
  400. }
  401. }
  402. /* PHY isn't up to snuff */
  403. printf("%s: PHY not found\n", name);
  404. return -ENODEV;
  405. }
  406. /**
  407. * macb_linkspd_cb - Linkspeed change callback function
  408. * @dev/@regs: MACB udevice (DM version) or
  409. * Base Register of MACB devices (non-DM version)
  410. * @speed: Linkspeed
  411. * Returns 0 when operation success and negative errno number
  412. * when operation failed.
  413. */
  414. #ifdef CONFIG_DM_ETH
  415. static int macb_sifive_clk_init(struct udevice *dev, ulong rate)
  416. {
  417. fdt_addr_t addr;
  418. void *gemgxl_regs;
  419. addr = dev_read_addr_index(dev, 1);
  420. if (addr == FDT_ADDR_T_NONE)
  421. return -ENODEV;
  422. gemgxl_regs = (void __iomem *)addr;
  423. if (!gemgxl_regs)
  424. return -ENODEV;
  425. /*
  426. * SiFive GEMGXL TX clock operation mode:
  427. *
  428. * 0 = GMII mode. Use 125 MHz gemgxlclk from PRCI in TX logic
  429. * and output clock on GMII output signal GTX_CLK
  430. * 1 = MII mode. Use MII input signal TX_CLK in TX logic
  431. */
  432. writel(rate != 125000000, gemgxl_regs);
  433. return 0;
  434. }
  435. int __weak macb_linkspd_cb(struct udevice *dev, unsigned int speed)
  436. {
  437. #ifdef CONFIG_CLK
  438. struct macb_device *macb = dev_get_priv(dev);
  439. struct clk tx_clk;
  440. ulong rate;
  441. int ret;
  442. switch (speed) {
  443. case _10BASET:
  444. rate = 2500000; /* 2.5 MHz */
  445. break;
  446. case _100BASET:
  447. rate = 25000000; /* 25 MHz */
  448. break;
  449. case _1000BASET:
  450. rate = 125000000; /* 125 MHz */
  451. break;
  452. default:
  453. /* does not change anything */
  454. return 0;
  455. }
  456. if (macb->config->clk_init)
  457. return macb->config->clk_init(dev, rate);
  458. /*
  459. * "tx_clk" is an optional clock source for MACB.
  460. * Ignore if it does not exist in DT.
  461. */
  462. ret = clk_get_by_name(dev, "tx_clk", &tx_clk);
  463. if (ret)
  464. return 0;
  465. if (tx_clk.dev) {
  466. ret = clk_set_rate(&tx_clk, rate);
  467. if (ret)
  468. return ret;
  469. }
  470. #endif
  471. return 0;
  472. }
  473. #else
  474. int __weak macb_linkspd_cb(void *regs, unsigned int speed)
  475. {
  476. return 0;
  477. }
  478. #endif
  479. #ifdef CONFIG_DM_ETH
  480. static int macb_phy_init(struct udevice *dev, const char *name)
  481. #else
  482. static int macb_phy_init(struct macb_device *macb, const char *name)
  483. #endif
  484. {
  485. #ifdef CONFIG_DM_ETH
  486. struct macb_device *macb = dev_get_priv(dev);
  487. #endif
  488. u32 ncfgr;
  489. u16 phy_id, status, adv, lpa;
  490. int media, speed, duplex;
  491. int ret;
  492. int i;
  493. arch_get_mdio_control(name);
  494. /* Auto-detect phy_addr */
  495. ret = macb_phy_find(macb, name);
  496. if (ret)
  497. return ret;
  498. /* Check if the PHY is up to snuff... */
  499. phy_id = macb_mdio_read(macb, macb->phy_addr, MII_PHYSID1);
  500. if (phy_id == 0xffff) {
  501. printf("%s: No PHY present\n", name);
  502. return -ENODEV;
  503. }
  504. #ifdef CONFIG_PHYLIB
  505. #ifdef CONFIG_DM_ETH
  506. macb->phydev = phy_connect(macb->bus, macb->phy_addr, dev,
  507. macb->phy_interface);
  508. #else
  509. /* need to consider other phy interface mode */
  510. macb->phydev = phy_connect(macb->bus, macb->phy_addr, &macb->netdev,
  511. PHY_INTERFACE_MODE_RGMII);
  512. #endif
  513. if (!macb->phydev) {
  514. printf("phy_connect failed\n");
  515. return -ENODEV;
  516. }
  517. phy_config(macb->phydev);
  518. #endif
  519. status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
  520. if (!(status & BMSR_LSTATUS)) {
  521. /* Try to re-negotiate if we don't have link already. */
  522. macb_phy_reset(macb, name);
  523. for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
  524. status = macb_mdio_read(macb, macb->phy_addr, MII_BMSR);
  525. if (status & BMSR_LSTATUS) {
  526. /*
  527. * Delay a bit after the link is established,
  528. * so that the next xfer does not fail
  529. */
  530. mdelay(10);
  531. break;
  532. }
  533. udelay(100);
  534. }
  535. }
  536. if (!(status & BMSR_LSTATUS)) {
  537. printf("%s: link down (status: 0x%04x)\n",
  538. name, status);
  539. return -ENETDOWN;
  540. }
  541. /* First check for GMAC and that it is GiB capable */
  542. if (gem_is_gigabit_capable(macb)) {
  543. lpa = macb_mdio_read(macb, macb->phy_addr, MII_STAT1000);
  544. if (lpa & (LPA_1000FULL | LPA_1000HALF | LPA_1000XFULL |
  545. LPA_1000XHALF)) {
  546. duplex = ((lpa & (LPA_1000FULL | LPA_1000XFULL)) ?
  547. 1 : 0);
  548. printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
  549. name,
  550. duplex ? "full" : "half",
  551. lpa);
  552. ncfgr = macb_readl(macb, NCFGR);
  553. ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
  554. ncfgr |= GEM_BIT(GBE);
  555. if (duplex)
  556. ncfgr |= MACB_BIT(FD);
  557. macb_writel(macb, NCFGR, ncfgr);
  558. #ifdef CONFIG_DM_ETH
  559. ret = macb_linkspd_cb(dev, _1000BASET);
  560. #else
  561. ret = macb_linkspd_cb(macb->regs, _1000BASET);
  562. #endif
  563. if (ret)
  564. return ret;
  565. return 0;
  566. }
  567. }
  568. /* fall back for EMAC checking */
  569. adv = macb_mdio_read(macb, macb->phy_addr, MII_ADVERTISE);
  570. lpa = macb_mdio_read(macb, macb->phy_addr, MII_LPA);
  571. media = mii_nway_result(lpa & adv);
  572. speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
  573. ? 1 : 0);
  574. duplex = (media & ADVERTISE_FULL) ? 1 : 0;
  575. printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
  576. name,
  577. speed ? "100" : "10",
  578. duplex ? "full" : "half",
  579. lpa);
  580. ncfgr = macb_readl(macb, NCFGR);
  581. ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
  582. if (speed) {
  583. ncfgr |= MACB_BIT(SPD);
  584. #ifdef CONFIG_DM_ETH
  585. ret = macb_linkspd_cb(dev, _100BASET);
  586. #else
  587. ret = macb_linkspd_cb(macb->regs, _100BASET);
  588. #endif
  589. } else {
  590. #ifdef CONFIG_DM_ETH
  591. ret = macb_linkspd_cb(dev, _10BASET);
  592. #else
  593. ret = macb_linkspd_cb(macb->regs, _10BASET);
  594. #endif
  595. }
  596. if (ret)
  597. return ret;
  598. if (duplex)
  599. ncfgr |= MACB_BIT(FD);
  600. macb_writel(macb, NCFGR, ncfgr);
  601. return 0;
  602. }
  603. static int gmac_init_multi_queues(struct macb_device *macb)
  604. {
  605. int i, num_queues = 1;
  606. u32 queue_mask;
  607. /* bit 0 is never set but queue 0 always exists */
  608. queue_mask = gem_readl(macb, DCFG6) & 0xff;
  609. queue_mask |= 0x1;
  610. for (i = 1; i < MACB_MAX_QUEUES; i++)
  611. if (queue_mask & (1 << i))
  612. num_queues++;
  613. macb->dummy_desc->ctrl = MACB_BIT(TX_USED);
  614. macb->dummy_desc->addr = 0;
  615. flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
  616. ALIGN(MACB_TX_DUMMY_DMA_DESC_SIZE, PKTALIGN));
  617. for (i = 1; i < num_queues; i++)
  618. gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1);
  619. return 0;
  620. }
  621. static void gmac_configure_dma(struct macb_device *macb)
  622. {
  623. u32 buffer_size;
  624. u32 dmacfg;
  625. buffer_size = macb->rx_buffer_size / RX_BUFFER_MULTIPLE;
  626. dmacfg = gem_readl(macb, DMACFG) & ~GEM_BF(RXBS, -1L);
  627. dmacfg |= GEM_BF(RXBS, buffer_size);
  628. if (macb->config->dma_burst_length)
  629. dmacfg = GEM_BFINS(FBLDO,
  630. macb->config->dma_burst_length, dmacfg);
  631. dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
  632. dmacfg &= ~GEM_BIT(ENDIA_PKT);
  633. if (macb->is_big_endian)
  634. dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
  635. else
  636. dmacfg &= ~GEM_BIT(ENDIA_DESC);
  637. dmacfg &= ~GEM_BIT(ADDR64);
  638. gem_writel(macb, DMACFG, dmacfg);
  639. }
  640. #ifdef CONFIG_DM_ETH
  641. static int _macb_init(struct udevice *dev, const char *name)
  642. #else
  643. static int _macb_init(struct macb_device *macb, const char *name)
  644. #endif
  645. {
  646. #ifdef CONFIG_DM_ETH
  647. struct macb_device *macb = dev_get_priv(dev);
  648. #endif
  649. unsigned long paddr;
  650. int ret;
  651. int i;
  652. /*
  653. * macb_halt should have been called at some point before now,
  654. * so we'll assume the controller is idle.
  655. */
  656. /* initialize DMA descriptors */
  657. paddr = macb->rx_buffer_dma;
  658. for (i = 0; i < MACB_RX_RING_SIZE; i++) {
  659. if (i == (MACB_RX_RING_SIZE - 1))
  660. paddr |= MACB_BIT(RX_WRAP);
  661. macb->rx_ring[i].addr = paddr;
  662. macb->rx_ring[i].ctrl = 0;
  663. paddr += macb->rx_buffer_size;
  664. }
  665. macb_flush_ring_desc(macb, RX);
  666. macb_flush_rx_buffer(macb);
  667. for (i = 0; i < MACB_TX_RING_SIZE; i++) {
  668. macb->tx_ring[i].addr = 0;
  669. if (i == (MACB_TX_RING_SIZE - 1))
  670. macb->tx_ring[i].ctrl = MACB_BIT(TX_USED) |
  671. MACB_BIT(TX_WRAP);
  672. else
  673. macb->tx_ring[i].ctrl = MACB_BIT(TX_USED);
  674. }
  675. macb_flush_ring_desc(macb, TX);
  676. macb->rx_tail = 0;
  677. macb->tx_head = 0;
  678. macb->tx_tail = 0;
  679. macb->next_rx_tail = 0;
  680. #ifdef CONFIG_MACB_ZYNQ
  681. gem_writel(macb, DMACFG, MACB_ZYNQ_GEM_DMACR_INIT);
  682. #endif
  683. macb_writel(macb, RBQP, macb->rx_ring_dma);
  684. macb_writel(macb, TBQP, macb->tx_ring_dma);
  685. if (macb_is_gem(macb)) {
  686. /* Initialize DMA properties */
  687. gmac_configure_dma(macb);
  688. /* Check the multi queue and initialize the queue for tx */
  689. gmac_init_multi_queues(macb);
  690. /*
  691. * When the GMAC IP with GE feature, this bit is used to
  692. * select interface between RGMII and GMII.
  693. * When the GMAC IP without GE feature, this bit is used
  694. * to select interface between RMII and MII.
  695. */
  696. #ifdef CONFIG_DM_ETH
  697. if ((macb->phy_interface == PHY_INTERFACE_MODE_RMII) ||
  698. (macb->phy_interface == PHY_INTERFACE_MODE_RGMII))
  699. gem_writel(macb, USRIO, GEM_BIT(RGMII));
  700. else
  701. gem_writel(macb, USRIO, 0);
  702. if (macb->phy_interface == PHY_INTERFACE_MODE_SGMII) {
  703. unsigned int ncfgr = macb_readl(macb, NCFGR);
  704. ncfgr |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
  705. macb_writel(macb, NCFGR, ncfgr);
  706. }
  707. #else
  708. #if defined(CONFIG_RGMII) || defined(CONFIG_RMII)
  709. gem_writel(macb, USRIO, GEM_BIT(RGMII));
  710. #else
  711. gem_writel(macb, USRIO, 0);
  712. #endif
  713. #endif
  714. } else {
  715. /* choose RMII or MII mode. This depends on the board */
  716. #ifdef CONFIG_DM_ETH
  717. #ifdef CONFIG_AT91FAMILY
  718. if (macb->phy_interface == PHY_INTERFACE_MODE_RMII) {
  719. macb_writel(macb, USRIO,
  720. MACB_BIT(RMII) | MACB_BIT(CLKEN));
  721. } else {
  722. macb_writel(macb, USRIO, MACB_BIT(CLKEN));
  723. }
  724. #else
  725. if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
  726. macb_writel(macb, USRIO, 0);
  727. else
  728. macb_writel(macb, USRIO, MACB_BIT(MII));
  729. #endif
  730. #else
  731. #ifdef CONFIG_RMII
  732. #ifdef CONFIG_AT91FAMILY
  733. macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
  734. #else
  735. macb_writel(macb, USRIO, 0);
  736. #endif
  737. #else
  738. #ifdef CONFIG_AT91FAMILY
  739. macb_writel(macb, USRIO, MACB_BIT(CLKEN));
  740. #else
  741. macb_writel(macb, USRIO, MACB_BIT(MII));
  742. #endif
  743. #endif /* CONFIG_RMII */
  744. #endif
  745. }
  746. #ifdef CONFIG_DM_ETH
  747. ret = macb_phy_init(dev, name);
  748. #else
  749. ret = macb_phy_init(macb, name);
  750. #endif
  751. if (ret)
  752. return ret;
  753. /* Enable TX and RX */
  754. macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
  755. return 0;
  756. }
  757. static void _macb_halt(struct macb_device *macb)
  758. {
  759. u32 ncr, tsr;
  760. /* Halt the controller and wait for any ongoing transmission to end. */
  761. ncr = macb_readl(macb, NCR);
  762. ncr |= MACB_BIT(THALT);
  763. macb_writel(macb, NCR, ncr);
  764. do {
  765. tsr = macb_readl(macb, TSR);
  766. } while (tsr & MACB_BIT(TGO));
  767. /* Disable TX and RX, and clear statistics */
  768. macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
  769. }
  770. static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
  771. {
  772. u32 hwaddr_bottom;
  773. u16 hwaddr_top;
  774. /* set hardware address */
  775. hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
  776. enetaddr[2] << 16 | enetaddr[3] << 24;
  777. macb_writel(macb, SA1B, hwaddr_bottom);
  778. hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
  779. macb_writel(macb, SA1T, hwaddr_top);
  780. return 0;
  781. }
  782. static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
  783. {
  784. u32 config;
  785. #if defined(CONFIG_DM_ETH) && defined(CONFIG_CLK)
  786. unsigned long macb_hz = macb->pclk_rate;
  787. #else
  788. unsigned long macb_hz = get_macb_pclk_rate(id);
  789. #endif
  790. if (macb_hz < 20000000)
  791. config = MACB_BF(CLK, MACB_CLK_DIV8);
  792. else if (macb_hz < 40000000)
  793. config = MACB_BF(CLK, MACB_CLK_DIV16);
  794. else if (macb_hz < 80000000)
  795. config = MACB_BF(CLK, MACB_CLK_DIV32);
  796. else
  797. config = MACB_BF(CLK, MACB_CLK_DIV64);
  798. return config;
  799. }
  800. static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
  801. {
  802. u32 config;
  803. #if defined(CONFIG_DM_ETH) && defined(CONFIG_CLK)
  804. unsigned long macb_hz = macb->pclk_rate;
  805. #else
  806. unsigned long macb_hz = get_macb_pclk_rate(id);
  807. #endif
  808. if (macb_hz < 20000000)
  809. config = GEM_BF(CLK, GEM_CLK_DIV8);
  810. else if (macb_hz < 40000000)
  811. config = GEM_BF(CLK, GEM_CLK_DIV16);
  812. else if (macb_hz < 80000000)
  813. config = GEM_BF(CLK, GEM_CLK_DIV32);
  814. else if (macb_hz < 120000000)
  815. config = GEM_BF(CLK, GEM_CLK_DIV48);
  816. else if (macb_hz < 160000000)
  817. config = GEM_BF(CLK, GEM_CLK_DIV64);
  818. else if (macb_hz < 240000000)
  819. config = GEM_BF(CLK, GEM_CLK_DIV96);
  820. else if (macb_hz < 320000000)
  821. config = GEM_BF(CLK, GEM_CLK_DIV128);
  822. else
  823. config = GEM_BF(CLK, GEM_CLK_DIV224);
  824. return config;
  825. }
  826. /*
  827. * Get the DMA bus width field of the network configuration register that we
  828. * should program. We find the width from decoding the design configuration
  829. * register to find the maximum supported data bus width.
  830. */
  831. static u32 macb_dbw(struct macb_device *macb)
  832. {
  833. switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
  834. case 4:
  835. return GEM_BF(DBW, GEM_DBW128);
  836. case 2:
  837. return GEM_BF(DBW, GEM_DBW64);
  838. case 1:
  839. default:
  840. return GEM_BF(DBW, GEM_DBW32);
  841. }
  842. }
  843. static void _macb_eth_initialize(struct macb_device *macb)
  844. {
  845. int id = 0; /* This is not used by functions we call */
  846. u32 ncfgr;
  847. if (macb_is_gem(macb))
  848. macb->rx_buffer_size = GEM_RX_BUFFER_SIZE;
  849. else
  850. macb->rx_buffer_size = MACB_RX_BUFFER_SIZE;
  851. /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
  852. macb->rx_buffer = dma_alloc_coherent(macb->rx_buffer_size *
  853. MACB_RX_RING_SIZE,
  854. &macb->rx_buffer_dma);
  855. macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
  856. &macb->rx_ring_dma);
  857. macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
  858. &macb->tx_ring_dma);
  859. macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
  860. &macb->dummy_desc_dma);
  861. /*
  862. * Do some basic initialization so that we at least can talk
  863. * to the PHY
  864. */
  865. if (macb_is_gem(macb)) {
  866. ncfgr = gem_mdc_clk_div(id, macb);
  867. ncfgr |= macb_dbw(macb);
  868. } else {
  869. ncfgr = macb_mdc_clk_div(id, macb);
  870. }
  871. macb_writel(macb, NCFGR, ncfgr);
  872. }
  873. #ifndef CONFIG_DM_ETH
  874. static int macb_send(struct eth_device *netdev, void *packet, int length)
  875. {
  876. struct macb_device *macb = to_macb(netdev);
  877. return _macb_send(macb, netdev->name, packet, length);
  878. }
  879. static int macb_recv(struct eth_device *netdev)
  880. {
  881. struct macb_device *macb = to_macb(netdev);
  882. uchar *packet;
  883. int length;
  884. macb->wrapped = false;
  885. for (;;) {
  886. macb->next_rx_tail = macb->rx_tail;
  887. length = _macb_recv(macb, &packet);
  888. if (length >= 0) {
  889. net_process_received_packet(packet, length);
  890. reclaim_rx_buffers(macb, macb->next_rx_tail);
  891. } else {
  892. return length;
  893. }
  894. }
  895. }
  896. static int macb_init(struct eth_device *netdev, struct bd_info *bd)
  897. {
  898. struct macb_device *macb = to_macb(netdev);
  899. return _macb_init(macb, netdev->name);
  900. }
  901. static void macb_halt(struct eth_device *netdev)
  902. {
  903. struct macb_device *macb = to_macb(netdev);
  904. return _macb_halt(macb);
  905. }
  906. static int macb_write_hwaddr(struct eth_device *netdev)
  907. {
  908. struct macb_device *macb = to_macb(netdev);
  909. return _macb_write_hwaddr(macb, netdev->enetaddr);
  910. }
  911. int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
  912. {
  913. struct macb_device *macb;
  914. struct eth_device *netdev;
  915. macb = malloc(sizeof(struct macb_device));
  916. if (!macb) {
  917. printf("Error: Failed to allocate memory for MACB%d\n", id);
  918. return -1;
  919. }
  920. memset(macb, 0, sizeof(struct macb_device));
  921. netdev = &macb->netdev;
  922. macb->regs = regs;
  923. macb->phy_addr = phy_addr;
  924. if (macb_is_gem(macb))
  925. sprintf(netdev->name, "gmac%d", id);
  926. else
  927. sprintf(netdev->name, "macb%d", id);
  928. netdev->init = macb_init;
  929. netdev->halt = macb_halt;
  930. netdev->send = macb_send;
  931. netdev->recv = macb_recv;
  932. netdev->write_hwaddr = macb_write_hwaddr;
  933. _macb_eth_initialize(macb);
  934. eth_register(netdev);
  935. #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
  936. int retval;
  937. struct mii_dev *mdiodev = mdio_alloc();
  938. if (!mdiodev)
  939. return -ENOMEM;
  940. strncpy(mdiodev->name, netdev->name, MDIO_NAME_LEN);
  941. mdiodev->read = macb_miiphy_read;
  942. mdiodev->write = macb_miiphy_write;
  943. retval = mdio_register(mdiodev);
  944. if (retval < 0)
  945. return retval;
  946. macb->bus = miiphy_get_dev_by_name(netdev->name);
  947. #endif
  948. return 0;
  949. }
  950. #endif /* !CONFIG_DM_ETH */
  951. #ifdef CONFIG_DM_ETH
  952. static int macb_start(struct udevice *dev)
  953. {
  954. return _macb_init(dev, dev->name);
  955. }
  956. static int macb_send(struct udevice *dev, void *packet, int length)
  957. {
  958. struct macb_device *macb = dev_get_priv(dev);
  959. return _macb_send(macb, dev->name, packet, length);
  960. }
  961. static int macb_recv(struct udevice *dev, int flags, uchar **packetp)
  962. {
  963. struct macb_device *macb = dev_get_priv(dev);
  964. macb->next_rx_tail = macb->rx_tail;
  965. macb->wrapped = false;
  966. return _macb_recv(macb, packetp);
  967. }
  968. static int macb_free_pkt(struct udevice *dev, uchar *packet, int length)
  969. {
  970. struct macb_device *macb = dev_get_priv(dev);
  971. reclaim_rx_buffers(macb, macb->next_rx_tail);
  972. return 0;
  973. }
  974. static void macb_stop(struct udevice *dev)
  975. {
  976. struct macb_device *macb = dev_get_priv(dev);
  977. _macb_halt(macb);
  978. }
  979. static int macb_write_hwaddr(struct udevice *dev)
  980. {
  981. struct eth_pdata *plat = dev_get_platdata(dev);
  982. struct macb_device *macb = dev_get_priv(dev);
  983. return _macb_write_hwaddr(macb, plat->enetaddr);
  984. }
  985. static const struct eth_ops macb_eth_ops = {
  986. .start = macb_start,
  987. .send = macb_send,
  988. .recv = macb_recv,
  989. .stop = macb_stop,
  990. .free_pkt = macb_free_pkt,
  991. .write_hwaddr = macb_write_hwaddr,
  992. };
  993. #ifdef CONFIG_CLK
  994. static int macb_enable_clk(struct udevice *dev)
  995. {
  996. struct macb_device *macb = dev_get_priv(dev);
  997. struct clk clk;
  998. ulong clk_rate;
  999. int ret;
  1000. ret = clk_get_by_index(dev, 0, &clk);
  1001. if (ret)
  1002. return -EINVAL;
  1003. /*
  1004. * If clock driver didn't support enable or disable then
  1005. * we get -ENOSYS from clk_enable(). To handle this, we
  1006. * don't fail for ret == -ENOSYS.
  1007. */
  1008. ret = clk_enable(&clk);
  1009. if (ret && ret != -ENOSYS)
  1010. return ret;
  1011. clk_rate = clk_get_rate(&clk);
  1012. if (!clk_rate)
  1013. return -EINVAL;
  1014. macb->pclk_rate = clk_rate;
  1015. return 0;
  1016. }
  1017. #endif
  1018. static const struct macb_config default_gem_config = {
  1019. .dma_burst_length = 16,
  1020. .clk_init = NULL,
  1021. };
  1022. static int macb_eth_probe(struct udevice *dev)
  1023. {
  1024. struct eth_pdata *pdata = dev_get_platdata(dev);
  1025. struct macb_device *macb = dev_get_priv(dev);
  1026. const char *phy_mode;
  1027. int ret;
  1028. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
  1029. NULL);
  1030. if (phy_mode)
  1031. macb->phy_interface = phy_get_interface_by_name(phy_mode);
  1032. if (macb->phy_interface == -1) {
  1033. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  1034. return -EINVAL;
  1035. }
  1036. macb->regs = (void *)pdata->iobase;
  1037. macb->is_big_endian = (cpu_to_be32(0x12345678) == 0x12345678);
  1038. macb->config = (struct macb_config *)dev_get_driver_data(dev);
  1039. if (!macb->config)
  1040. macb->config = &default_gem_config;
  1041. #ifdef CONFIG_CLK
  1042. ret = macb_enable_clk(dev);
  1043. if (ret)
  1044. return ret;
  1045. #endif
  1046. _macb_eth_initialize(macb);
  1047. #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
  1048. macb->bus = mdio_alloc();
  1049. if (!macb->bus)
  1050. return -ENOMEM;
  1051. strncpy(macb->bus->name, dev->name, MDIO_NAME_LEN);
  1052. macb->bus->read = macb_miiphy_read;
  1053. macb->bus->write = macb_miiphy_write;
  1054. ret = mdio_register(macb->bus);
  1055. if (ret < 0)
  1056. return ret;
  1057. macb->bus = miiphy_get_dev_by_name(dev->name);
  1058. #endif
  1059. return 0;
  1060. }
  1061. static int macb_eth_remove(struct udevice *dev)
  1062. {
  1063. struct macb_device *macb = dev_get_priv(dev);
  1064. #ifdef CONFIG_PHYLIB
  1065. free(macb->phydev);
  1066. #endif
  1067. mdio_unregister(macb->bus);
  1068. mdio_free(macb->bus);
  1069. return 0;
  1070. }
  1071. /**
  1072. * macb_late_eth_ofdata_to_platdata
  1073. * @dev: udevice struct
  1074. * Returns 0 when operation success and negative errno number
  1075. * when operation failed.
  1076. */
  1077. int __weak macb_late_eth_ofdata_to_platdata(struct udevice *dev)
  1078. {
  1079. return 0;
  1080. }
  1081. static int macb_eth_ofdata_to_platdata(struct udevice *dev)
  1082. {
  1083. struct eth_pdata *pdata = dev_get_platdata(dev);
  1084. pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
  1085. if (!pdata->iobase)
  1086. return -EINVAL;
  1087. return macb_late_eth_ofdata_to_platdata(dev);
  1088. }
  1089. static const struct macb_config sama5d4_config = {
  1090. .dma_burst_length = 4,
  1091. .clk_init = NULL,
  1092. };
  1093. static const struct macb_config sifive_config = {
  1094. .dma_burst_length = 16,
  1095. .clk_init = macb_sifive_clk_init,
  1096. };
  1097. static const struct udevice_id macb_eth_ids[] = {
  1098. { .compatible = "cdns,macb" },
  1099. { .compatible = "cdns,at91sam9260-macb" },
  1100. { .compatible = "cdns,sam9x60-macb" },
  1101. { .compatible = "atmel,sama5d2-gem" },
  1102. { .compatible = "atmel,sama5d3-gem" },
  1103. { .compatible = "atmel,sama5d4-gem", .data = (ulong)&sama5d4_config },
  1104. { .compatible = "cdns,zynq-gem" },
  1105. { .compatible = "sifive,fu540-c000-gem",
  1106. .data = (ulong)&sifive_config },
  1107. { }
  1108. };
  1109. U_BOOT_DRIVER(eth_macb) = {
  1110. .name = "eth_macb",
  1111. .id = UCLASS_ETH,
  1112. .of_match = macb_eth_ids,
  1113. .ofdata_to_platdata = macb_eth_ofdata_to_platdata,
  1114. .probe = macb_eth_probe,
  1115. .remove = macb_eth_remove,
  1116. .ops = &macb_eth_ops,
  1117. .priv_auto_alloc_size = sizeof(struct macb_device),
  1118. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  1119. };
  1120. #endif
  1121. #endif