dwc_eth_qos.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016, NVIDIA CORPORATION.
  4. *
  5. * Portions based on U-Boot's rtl8169.c.
  6. */
  7. /*
  8. * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
  9. * Service) IP block. The IP supports multiple options for bus type, clocking/
  10. * reset structure, and feature list.
  11. *
  12. * The driver is written such that generic core logic is kept separate from
  13. * configuration-specific logic. Code that interacts with configuration-
  14. * specific resources is split out into separate functions to avoid polluting
  15. * common code. If/when this driver is enhanced to support multiple
  16. * configurations, the core code should be adapted to call all configuration-
  17. * specific functions through function pointers, with the definition of those
  18. * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
  19. * field.
  20. *
  21. * The following configurations are currently supported:
  22. * tegra186:
  23. * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
  24. * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
  25. * supports a single RGMII PHY. This configuration also has SW control over
  26. * all clock and reset signals to the HW block.
  27. */
  28. #define LOG_CATEGORY UCLASS_ETH
  29. #include <common.h>
  30. #include <clk.h>
  31. #include <cpu_func.h>
  32. #include <dm.h>
  33. #include <errno.h>
  34. #include <log.h>
  35. #include <malloc.h>
  36. #include <memalign.h>
  37. #include <miiphy.h>
  38. #include <net.h>
  39. #include <netdev.h>
  40. #include <phy.h>
  41. #include <reset.h>
  42. #include <wait_bit.h>
  43. #include <asm/cache.h>
  44. #include <asm/gpio.h>
  45. #include <asm/io.h>
  46. #include <eth_phy.h>
  47. #ifdef CONFIG_ARCH_IMX8M
  48. #include <asm/arch/clock.h>
  49. #include <asm/mach-imx/sys_proto.h>
  50. #endif
  51. #include <linux/bitops.h>
  52. #include <linux/delay.h>
  53. /* Core registers */
  54. #define EQOS_MAC_REGS_BASE 0x000
  55. struct eqos_mac_regs {
  56. uint32_t configuration; /* 0x000 */
  57. uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
  58. uint32_t q0_tx_flow_ctrl; /* 0x070 */
  59. uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
  60. uint32_t rx_flow_ctrl; /* 0x090 */
  61. uint32_t unused_094; /* 0x094 */
  62. uint32_t txq_prty_map0; /* 0x098 */
  63. uint32_t unused_09c; /* 0x09c */
  64. uint32_t rxq_ctrl0; /* 0x0a0 */
  65. uint32_t unused_0a4; /* 0x0a4 */
  66. uint32_t rxq_ctrl2; /* 0x0a8 */
  67. uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
  68. uint32_t us_tic_counter; /* 0x0dc */
  69. uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
  70. uint32_t hw_feature0; /* 0x11c */
  71. uint32_t hw_feature1; /* 0x120 */
  72. uint32_t hw_feature2; /* 0x124 */
  73. uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
  74. uint32_t mdio_address; /* 0x200 */
  75. uint32_t mdio_data; /* 0x204 */
  76. uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
  77. uint32_t address0_high; /* 0x300 */
  78. uint32_t address0_low; /* 0x304 */
  79. };
  80. #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
  81. #define EQOS_MAC_CONFIGURATION_CST BIT(21)
  82. #define EQOS_MAC_CONFIGURATION_ACS BIT(20)
  83. #define EQOS_MAC_CONFIGURATION_WD BIT(19)
  84. #define EQOS_MAC_CONFIGURATION_JD BIT(17)
  85. #define EQOS_MAC_CONFIGURATION_JE BIT(16)
  86. #define EQOS_MAC_CONFIGURATION_PS BIT(15)
  87. #define EQOS_MAC_CONFIGURATION_FES BIT(14)
  88. #define EQOS_MAC_CONFIGURATION_DM BIT(13)
  89. #define EQOS_MAC_CONFIGURATION_LM BIT(12)
  90. #define EQOS_MAC_CONFIGURATION_TE BIT(1)
  91. #define EQOS_MAC_CONFIGURATION_RE BIT(0)
  92. #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
  93. #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
  94. #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
  95. #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
  96. #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
  97. #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
  98. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
  99. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
  100. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
  101. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
  102. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
  103. #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
  104. #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
  105. #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
  106. #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
  107. #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
  108. #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
  109. #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
  110. #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
  111. #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
  112. #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
  113. #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
  114. #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
  115. #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
  116. #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
  117. #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
  118. #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
  119. #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
  120. #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
  121. #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
  122. #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
  123. #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
  124. #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
  125. #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
  126. #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
  127. #define EQOS_MTL_REGS_BASE 0xd00
  128. struct eqos_mtl_regs {
  129. uint32_t txq0_operation_mode; /* 0xd00 */
  130. uint32_t unused_d04; /* 0xd04 */
  131. uint32_t txq0_debug; /* 0xd08 */
  132. uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
  133. uint32_t txq0_quantum_weight; /* 0xd18 */
  134. uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
  135. uint32_t rxq0_operation_mode; /* 0xd30 */
  136. uint32_t unused_d34; /* 0xd34 */
  137. uint32_t rxq0_debug; /* 0xd38 */
  138. };
  139. #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
  140. #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
  141. #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
  142. #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
  143. #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
  144. #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
  145. #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
  146. #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
  147. #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
  148. #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
  149. #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
  150. #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
  151. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
  152. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
  153. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
  154. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
  155. #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
  156. #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
  157. #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
  158. #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
  159. #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
  160. #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
  161. #define EQOS_DMA_REGS_BASE 0x1000
  162. struct eqos_dma_regs {
  163. uint32_t mode; /* 0x1000 */
  164. uint32_t sysbus_mode; /* 0x1004 */
  165. uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
  166. uint32_t ch0_control; /* 0x1100 */
  167. uint32_t ch0_tx_control; /* 0x1104 */
  168. uint32_t ch0_rx_control; /* 0x1108 */
  169. uint32_t unused_110c; /* 0x110c */
  170. uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
  171. uint32_t ch0_txdesc_list_address; /* 0x1114 */
  172. uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
  173. uint32_t ch0_rxdesc_list_address; /* 0x111c */
  174. uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
  175. uint32_t unused_1124; /* 0x1124 */
  176. uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
  177. uint32_t ch0_txdesc_ring_length; /* 0x112c */
  178. uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
  179. };
  180. #define EQOS_DMA_MODE_SWR BIT(0)
  181. #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
  182. #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
  183. #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
  184. #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
  185. #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
  186. #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
  187. #define EQOS_DMA_CH0_CONTROL_DSL_SHIFT 18
  188. #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
  189. #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
  190. #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
  191. #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
  192. #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
  193. #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
  194. #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
  195. #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
  196. #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
  197. #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
  198. /* These registers are Tegra186-specific */
  199. #define EQOS_TEGRA186_REGS_BASE 0x8800
  200. struct eqos_tegra186_regs {
  201. uint32_t sdmemcomppadctrl; /* 0x8800 */
  202. uint32_t auto_cal_config; /* 0x8804 */
  203. uint32_t unused_8808; /* 0x8808 */
  204. uint32_t auto_cal_status; /* 0x880c */
  205. };
  206. #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
  207. #define EQOS_AUTO_CAL_CONFIG_START BIT(31)
  208. #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
  209. #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
  210. /* Descriptors */
  211. #define EQOS_DESCRIPTORS_TX 4
  212. #define EQOS_DESCRIPTORS_RX 4
  213. #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
  214. #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
  215. #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
  216. #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
  217. struct eqos_desc {
  218. u32 des0;
  219. u32 des1;
  220. u32 des2;
  221. u32 des3;
  222. };
  223. #define EQOS_DESC3_OWN BIT(31)
  224. #define EQOS_DESC3_FD BIT(29)
  225. #define EQOS_DESC3_LD BIT(28)
  226. #define EQOS_DESC3_BUF1V BIT(24)
  227. #define EQOS_AXI_WIDTH_32 4
  228. #define EQOS_AXI_WIDTH_64 8
  229. #define EQOS_AXI_WIDTH_128 16
  230. struct eqos_config {
  231. bool reg_access_always_ok;
  232. int mdio_wait;
  233. int swr_wait;
  234. int config_mac;
  235. int config_mac_mdio;
  236. unsigned int axi_bus_width;
  237. phy_interface_t (*interface)(struct udevice *dev);
  238. struct eqos_ops *ops;
  239. };
  240. struct eqos_ops {
  241. void (*eqos_inval_desc)(void *desc);
  242. void (*eqos_flush_desc)(void *desc);
  243. void (*eqos_inval_buffer)(void *buf, size_t size);
  244. void (*eqos_flush_buffer)(void *buf, size_t size);
  245. int (*eqos_probe_resources)(struct udevice *dev);
  246. int (*eqos_remove_resources)(struct udevice *dev);
  247. int (*eqos_stop_resets)(struct udevice *dev);
  248. int (*eqos_start_resets)(struct udevice *dev);
  249. int (*eqos_stop_clks)(struct udevice *dev);
  250. int (*eqos_start_clks)(struct udevice *dev);
  251. int (*eqos_calibrate_pads)(struct udevice *dev);
  252. int (*eqos_disable_calibration)(struct udevice *dev);
  253. int (*eqos_set_tx_clk_speed)(struct udevice *dev);
  254. ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
  255. };
  256. struct eqos_priv {
  257. struct udevice *dev;
  258. const struct eqos_config *config;
  259. fdt_addr_t regs;
  260. struct eqos_mac_regs *mac_regs;
  261. struct eqos_mtl_regs *mtl_regs;
  262. struct eqos_dma_regs *dma_regs;
  263. struct eqos_tegra186_regs *tegra186_regs;
  264. struct reset_ctl reset_ctl;
  265. struct gpio_desc phy_reset_gpio;
  266. struct clk clk_master_bus;
  267. struct clk clk_rx;
  268. struct clk clk_ptp_ref;
  269. struct clk clk_tx;
  270. struct clk clk_ck;
  271. struct clk clk_slave_bus;
  272. struct mii_dev *mii;
  273. struct phy_device *phy;
  274. u32 max_speed;
  275. void *descs;
  276. int tx_desc_idx, rx_desc_idx;
  277. unsigned int desc_size;
  278. void *tx_dma_buf;
  279. void *rx_dma_buf;
  280. void *rx_pkt;
  281. bool started;
  282. bool reg_access_ok;
  283. bool clk_ck_enabled;
  284. struct reset_ctl_bulk reset_bulk;
  285. struct clk_bulk clk_bulk;
  286. struct clk rmii_rtx;
  287. };
  288. /*
  289. * TX and RX descriptors are 16 bytes. This causes problems with the cache
  290. * maintenance on CPUs where the cache-line size exceeds the size of these
  291. * descriptors. What will happen is that when the driver receives a packet
  292. * it will be immediately requeued for the hardware to reuse. The CPU will
  293. * therefore need to flush the cache-line containing the descriptor, which
  294. * will cause all other descriptors in the same cache-line to be flushed
  295. * along with it. If one of those descriptors had been written to by the
  296. * device those changes (and the associated packet) will be lost.
  297. *
  298. * To work around this, we make use of non-cached memory if available. If
  299. * descriptors are mapped uncached there's no need to manually flush them
  300. * or invalidate them.
  301. *
  302. * Note that this only applies to descriptors. The packet data buffers do
  303. * not have the same constraints since they are 1536 bytes large, so they
  304. * are unlikely to share cache-lines.
  305. */
  306. static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
  307. {
  308. eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
  309. (unsigned int)ARCH_DMA_MINALIGN);
  310. return memalign(eqos->desc_size, num * eqos->desc_size);
  311. }
  312. static void eqos_free_descs(void *descs)
  313. {
  314. free(descs);
  315. }
  316. static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
  317. unsigned int num, bool rx)
  318. {
  319. return eqos->descs +
  320. ((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
  321. }
  322. static void eqos_inval_desc_generic(void *desc)
  323. {
  324. unsigned long start = (unsigned long)desc;
  325. unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
  326. ARCH_DMA_MINALIGN);
  327. invalidate_dcache_range(start, end);
  328. }
  329. static void eqos_flush_desc_generic(void *desc)
  330. {
  331. unsigned long start = (unsigned long)desc;
  332. unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
  333. ARCH_DMA_MINALIGN);
  334. flush_dcache_range(start, end);
  335. }
  336. static void eqos_inval_buffer_tegra186(void *buf, size_t size)
  337. {
  338. unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
  339. unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
  340. invalidate_dcache_range(start, end);
  341. }
  342. static void eqos_inval_buffer_generic(void *buf, size_t size)
  343. {
  344. unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
  345. unsigned long end = roundup((unsigned long)buf + size,
  346. ARCH_DMA_MINALIGN);
  347. invalidate_dcache_range(start, end);
  348. }
  349. static void eqos_flush_buffer_tegra186(void *buf, size_t size)
  350. {
  351. flush_cache((unsigned long)buf, size);
  352. }
  353. static void eqos_flush_buffer_generic(void *buf, size_t size)
  354. {
  355. unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
  356. unsigned long end = roundup((unsigned long)buf + size,
  357. ARCH_DMA_MINALIGN);
  358. flush_dcache_range(start, end);
  359. }
  360. static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
  361. {
  362. return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
  363. EQOS_MAC_MDIO_ADDRESS_GB, false,
  364. 1000000, true);
  365. }
  366. static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
  367. int mdio_reg)
  368. {
  369. struct eqos_priv *eqos = bus->priv;
  370. u32 val;
  371. int ret;
  372. debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
  373. mdio_reg);
  374. ret = eqos_mdio_wait_idle(eqos);
  375. if (ret) {
  376. pr_err("MDIO not idle at entry");
  377. return ret;
  378. }
  379. val = readl(&eqos->mac_regs->mdio_address);
  380. val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
  381. EQOS_MAC_MDIO_ADDRESS_C45E;
  382. val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
  383. (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
  384. (eqos->config->config_mac_mdio <<
  385. EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
  386. (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
  387. EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
  388. EQOS_MAC_MDIO_ADDRESS_GB;
  389. writel(val, &eqos->mac_regs->mdio_address);
  390. udelay(eqos->config->mdio_wait);
  391. ret = eqos_mdio_wait_idle(eqos);
  392. if (ret) {
  393. pr_err("MDIO read didn't complete");
  394. return ret;
  395. }
  396. val = readl(&eqos->mac_regs->mdio_data);
  397. val &= EQOS_MAC_MDIO_DATA_GD_MASK;
  398. debug("%s: val=%x\n", __func__, val);
  399. return val;
  400. }
  401. static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
  402. int mdio_reg, u16 mdio_val)
  403. {
  404. struct eqos_priv *eqos = bus->priv;
  405. u32 val;
  406. int ret;
  407. debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
  408. mdio_addr, mdio_reg, mdio_val);
  409. ret = eqos_mdio_wait_idle(eqos);
  410. if (ret) {
  411. pr_err("MDIO not idle at entry");
  412. return ret;
  413. }
  414. writel(mdio_val, &eqos->mac_regs->mdio_data);
  415. val = readl(&eqos->mac_regs->mdio_address);
  416. val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
  417. EQOS_MAC_MDIO_ADDRESS_C45E;
  418. val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
  419. (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
  420. (eqos->config->config_mac_mdio <<
  421. EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
  422. (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
  423. EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
  424. EQOS_MAC_MDIO_ADDRESS_GB;
  425. writel(val, &eqos->mac_regs->mdio_address);
  426. udelay(eqos->config->mdio_wait);
  427. ret = eqos_mdio_wait_idle(eqos);
  428. if (ret) {
  429. pr_err("MDIO read didn't complete");
  430. return ret;
  431. }
  432. return 0;
  433. }
  434. static int eqos_start_clks_tegra186(struct udevice *dev)
  435. {
  436. #ifdef CONFIG_CLK
  437. struct eqos_priv *eqos = dev_get_priv(dev);
  438. int ret;
  439. debug("%s(dev=%p):\n", __func__, dev);
  440. ret = clk_enable(&eqos->clk_slave_bus);
  441. if (ret < 0) {
  442. pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
  443. goto err;
  444. }
  445. ret = clk_enable(&eqos->clk_master_bus);
  446. if (ret < 0) {
  447. pr_err("clk_enable(clk_master_bus) failed: %d", ret);
  448. goto err_disable_clk_slave_bus;
  449. }
  450. ret = clk_enable(&eqos->clk_rx);
  451. if (ret < 0) {
  452. pr_err("clk_enable(clk_rx) failed: %d", ret);
  453. goto err_disable_clk_master_bus;
  454. }
  455. ret = clk_enable(&eqos->clk_ptp_ref);
  456. if (ret < 0) {
  457. pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
  458. goto err_disable_clk_rx;
  459. }
  460. ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
  461. if (ret < 0) {
  462. pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
  463. goto err_disable_clk_ptp_ref;
  464. }
  465. ret = clk_enable(&eqos->clk_tx);
  466. if (ret < 0) {
  467. pr_err("clk_enable(clk_tx) failed: %d", ret);
  468. goto err_disable_clk_ptp_ref;
  469. }
  470. #endif
  471. debug("%s: OK\n", __func__);
  472. return 0;
  473. #ifdef CONFIG_CLK
  474. err_disable_clk_ptp_ref:
  475. clk_disable(&eqos->clk_ptp_ref);
  476. err_disable_clk_rx:
  477. clk_disable(&eqos->clk_rx);
  478. err_disable_clk_master_bus:
  479. clk_disable(&eqos->clk_master_bus);
  480. err_disable_clk_slave_bus:
  481. clk_disable(&eqos->clk_slave_bus);
  482. err:
  483. debug("%s: FAILED: %d\n", __func__, ret);
  484. return ret;
  485. #endif
  486. }
  487. static int eqos_start_clks_stm32(struct udevice *dev)
  488. {
  489. #ifdef CONFIG_CLK
  490. struct eqos_priv *eqos = dev_get_priv(dev);
  491. int ret;
  492. debug("%s(dev=%p):\n", __func__, dev);
  493. ret = clk_enable(&eqos->clk_master_bus);
  494. if (ret < 0) {
  495. pr_err("clk_enable(clk_master_bus) failed: %d", ret);
  496. goto err;
  497. }
  498. ret = clk_enable(&eqos->clk_rx);
  499. if (ret < 0) {
  500. pr_err("clk_enable(clk_rx) failed: %d", ret);
  501. goto err_disable_clk_master_bus;
  502. }
  503. ret = clk_enable(&eqos->clk_tx);
  504. if (ret < 0) {
  505. pr_err("clk_enable(clk_tx) failed: %d", ret);
  506. goto err_disable_clk_rx;
  507. }
  508. if (clk_valid(&eqos->clk_ck) && !eqos->clk_ck_enabled) {
  509. ret = clk_enable(&eqos->clk_ck);
  510. if (ret < 0) {
  511. pr_err("clk_enable(clk_ck) failed: %d", ret);
  512. goto err_disable_clk_tx;
  513. }
  514. eqos->clk_ck_enabled = true;
  515. }
  516. #endif
  517. debug("%s: OK\n", __func__);
  518. return 0;
  519. #ifdef CONFIG_CLK
  520. err_disable_clk_tx:
  521. clk_disable(&eqos->clk_tx);
  522. err_disable_clk_rx:
  523. clk_disable(&eqos->clk_rx);
  524. err_disable_clk_master_bus:
  525. clk_disable(&eqos->clk_master_bus);
  526. err:
  527. debug("%s: FAILED: %d\n", __func__, ret);
  528. return ret;
  529. #endif
  530. }
  531. static int eqos_start_clks_jh7110(struct udevice *dev)
  532. {
  533. struct eqos_priv *eqos = dev_get_priv(dev);
  534. int ret;
  535. ret = clk_enable_bulk(&eqos->clk_bulk);
  536. if (ret < 0) {
  537. pr_err("clk_enable_bulk failed: %d", ret);
  538. }
  539. return ret;
  540. }
  541. static int eqos_stop_clks_tegra186(struct udevice *dev)
  542. {
  543. #ifdef CONFIG_CLK
  544. struct eqos_priv *eqos = dev_get_priv(dev);
  545. debug("%s(dev=%p):\n", __func__, dev);
  546. clk_disable(&eqos->clk_tx);
  547. clk_disable(&eqos->clk_ptp_ref);
  548. clk_disable(&eqos->clk_rx);
  549. clk_disable(&eqos->clk_master_bus);
  550. clk_disable(&eqos->clk_slave_bus);
  551. #endif
  552. debug("%s: OK\n", __func__);
  553. return 0;
  554. }
  555. static int eqos_stop_clks_stm32(struct udevice *dev)
  556. {
  557. #ifdef CONFIG_CLK
  558. struct eqos_priv *eqos = dev_get_priv(dev);
  559. debug("%s(dev=%p):\n", __func__, dev);
  560. clk_disable(&eqos->clk_tx);
  561. clk_disable(&eqos->clk_rx);
  562. clk_disable(&eqos->clk_master_bus);
  563. #endif
  564. debug("%s: OK\n", __func__);
  565. return 0;
  566. }
  567. static int eqos_stop_clks_jh7110(struct udevice *dev)
  568. {
  569. struct eqos_priv *eqos = dev_get_priv(dev);
  570. clk_disable_bulk(&eqos->clk_bulk);
  571. return 0;
  572. }
  573. static int eqos_start_resets_tegra186(struct udevice *dev)
  574. {
  575. struct eqos_priv *eqos = dev_get_priv(dev);
  576. int ret;
  577. debug("%s(dev=%p):\n", __func__, dev);
  578. ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
  579. if (ret < 0) {
  580. pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
  581. return ret;
  582. }
  583. udelay(2);
  584. ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
  585. if (ret < 0) {
  586. pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
  587. return ret;
  588. }
  589. ret = reset_assert(&eqos->reset_ctl);
  590. if (ret < 0) {
  591. pr_err("reset_assert() failed: %d", ret);
  592. return ret;
  593. }
  594. udelay(2);
  595. ret = reset_deassert(&eqos->reset_ctl);
  596. if (ret < 0) {
  597. pr_err("reset_deassert() failed: %d", ret);
  598. return ret;
  599. }
  600. debug("%s: OK\n", __func__);
  601. return 0;
  602. }
  603. static int eqos_start_resets_jh7110(struct udevice *dev)
  604. {
  605. struct eqos_priv *eqos = dev_get_priv(dev);
  606. int ret;
  607. ret = reset_deassert_bulk(&eqos->reset_bulk);
  608. if (ret < 0) {
  609. pr_err("reset_deassert() failed: %d", ret);
  610. return ret;
  611. }
  612. debug("%s: OK\n", __func__);
  613. return 0;
  614. }
  615. static int eqos_stop_resets_tegra186(struct udevice *dev)
  616. {
  617. struct eqos_priv *eqos = dev_get_priv(dev);
  618. reset_assert(&eqos->reset_ctl);
  619. dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
  620. return 0;
  621. }
  622. static int eqos_stop_resets_jh7110(struct udevice *dev)
  623. {
  624. struct eqos_priv *eqos = dev_get_priv(dev);
  625. reset_assert_bulk(&eqos->reset_bulk);
  626. return 0;
  627. }
  628. static int eqos_calibrate_pads_tegra186(struct udevice *dev)
  629. {
  630. struct eqos_priv *eqos = dev_get_priv(dev);
  631. int ret;
  632. debug("%s(dev=%p):\n", __func__, dev);
  633. setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
  634. EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
  635. udelay(1);
  636. setbits_le32(&eqos->tegra186_regs->auto_cal_config,
  637. EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
  638. ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
  639. EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
  640. if (ret) {
  641. pr_err("calibrate didn't start");
  642. goto failed;
  643. }
  644. ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
  645. EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
  646. if (ret) {
  647. pr_err("calibrate didn't finish");
  648. goto failed;
  649. }
  650. ret = 0;
  651. failed:
  652. clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
  653. EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
  654. debug("%s: returns %d\n", __func__, ret);
  655. return ret;
  656. }
  657. static int eqos_disable_calibration_tegra186(struct udevice *dev)
  658. {
  659. struct eqos_priv *eqos = dev_get_priv(dev);
  660. debug("%s(dev=%p):\n", __func__, dev);
  661. clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
  662. EQOS_AUTO_CAL_CONFIG_ENABLE);
  663. return 0;
  664. }
  665. static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
  666. {
  667. #ifdef CONFIG_CLK
  668. struct eqos_priv *eqos = dev_get_priv(dev);
  669. return clk_get_rate(&eqos->clk_slave_bus);
  670. #else
  671. return 0;
  672. #endif
  673. }
  674. static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
  675. {
  676. #ifdef CONFIG_CLK
  677. struct eqos_priv *eqos = dev_get_priv(dev);
  678. return clk_get_rate(&eqos->clk_master_bus);
  679. #else
  680. return 0;
  681. #endif
  682. }
  683. static ulong eqos_get_tick_clk_rate_jh7110(struct udevice *dev)
  684. {
  685. struct eqos_priv *eqos = dev_get_priv(dev);
  686. ulong rate;
  687. rate = clk_get_rate(&eqos->clk_tx);
  688. return rate;
  689. }
  690. __weak int jh7110_eqos_txclk_set_rate(struct udevice *dev,
  691. unsigned long rate)
  692. {
  693. struct eqos_priv *eqos = dev_get_priv(dev);
  694. return clk_set_rate(&eqos->clk_tx, rate);
  695. }
  696. __weak u32 imx_get_eqos_csr_clk(void)
  697. {
  698. return 100 * 1000000;
  699. }
  700. __weak int imx_eqos_txclk_set_rate(unsigned long rate)
  701. {
  702. return 0;
  703. }
  704. static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
  705. {
  706. return imx_get_eqos_csr_clk();
  707. }
  708. static int eqos_set_full_duplex(struct udevice *dev)
  709. {
  710. struct eqos_priv *eqos = dev_get_priv(dev);
  711. debug("%s(dev=%p):\n", __func__, dev);
  712. setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
  713. return 0;
  714. }
  715. static int eqos_set_half_duplex(struct udevice *dev)
  716. {
  717. struct eqos_priv *eqos = dev_get_priv(dev);
  718. debug("%s(dev=%p):\n", __func__, dev);
  719. clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
  720. /* WAR: Flush TX queue when switching to half-duplex */
  721. setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
  722. EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
  723. return 0;
  724. }
  725. static int eqos_set_gmii_speed(struct udevice *dev)
  726. {
  727. struct eqos_priv *eqos = dev_get_priv(dev);
  728. debug("%s(dev=%p):\n", __func__, dev);
  729. clrbits_le32(&eqos->mac_regs->configuration,
  730. EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
  731. return 0;
  732. }
  733. static int eqos_set_mii_speed_100(struct udevice *dev)
  734. {
  735. struct eqos_priv *eqos = dev_get_priv(dev);
  736. debug("%s(dev=%p):\n", __func__, dev);
  737. setbits_le32(&eqos->mac_regs->configuration,
  738. EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
  739. return 0;
  740. }
  741. static int eqos_set_mii_speed_10(struct udevice *dev)
  742. {
  743. struct eqos_priv *eqos = dev_get_priv(dev);
  744. debug("%s(dev=%p):\n", __func__, dev);
  745. clrsetbits_le32(&eqos->mac_regs->configuration,
  746. EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
  747. return 0;
  748. }
  749. static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
  750. {
  751. #ifdef CONFIG_CLK
  752. struct eqos_priv *eqos = dev_get_priv(dev);
  753. ulong rate;
  754. int ret;
  755. debug("%s(dev=%p):\n", __func__, dev);
  756. switch (eqos->phy->speed) {
  757. case SPEED_1000:
  758. rate = 125 * 1000 * 1000;
  759. break;
  760. case SPEED_100:
  761. rate = 25 * 1000 * 1000;
  762. break;
  763. case SPEED_10:
  764. rate = 2.5 * 1000 * 1000;
  765. break;
  766. default:
  767. pr_err("invalid speed %d", eqos->phy->speed);
  768. return -EINVAL;
  769. }
  770. ret = clk_set_rate(&eqos->clk_tx, rate);
  771. if (ret < 0) {
  772. pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
  773. return ret;
  774. }
  775. #endif
  776. return 0;
  777. }
  778. static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
  779. {
  780. struct eqos_priv *eqos = dev_get_priv(dev);
  781. ulong rate;
  782. int ret;
  783. debug("%s(dev=%p):\n", __func__, dev);
  784. switch (eqos->phy->speed) {
  785. case SPEED_1000:
  786. rate = 125 * 1000 * 1000;
  787. break;
  788. case SPEED_100:
  789. rate = 25 * 1000 * 1000;
  790. break;
  791. case SPEED_10:
  792. rate = 2.5 * 1000 * 1000;
  793. break;
  794. default:
  795. pr_err("invalid speed %d", eqos->phy->speed);
  796. return -EINVAL;
  797. }
  798. ret = imx_eqos_txclk_set_rate(rate);
  799. if (ret < 0) {
  800. pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
  801. return ret;
  802. }
  803. return 0;
  804. }
  805. static int eqos_set_tx_clk_speed_jh7110(struct udevice *dev)
  806. {
  807. struct eqos_priv *eqos = dev_get_priv(dev);
  808. ulong rate;
  809. int ret;
  810. switch (eqos->phy->speed) {
  811. case SPEED_1000:
  812. rate = 125 * 1000 * 1000;
  813. break;
  814. case SPEED_100:
  815. rate = 25 * 1000 * 1000;
  816. break;
  817. case SPEED_10:
  818. rate = 2.5 * 1000 * 1000;
  819. break;
  820. default:
  821. pr_err("invalid speed %d", eqos->phy->speed);
  822. return -EINVAL;
  823. }
  824. ret = jh7110_eqos_txclk_set_rate(dev, rate);
  825. if (ret < 0) {
  826. pr_err("jh7110 (tx_clk, %lu) failed: %d", rate, ret);
  827. return ret;
  828. }
  829. #if CONFIG_IS_ENABLED(TARGET_STARFIVE_VISIONFIVE2)
  830. clk_set_rate(&eqos->rmii_rtx, rate);
  831. #endif
  832. return 0;
  833. }
  834. static int eqos_adjust_link(struct udevice *dev)
  835. {
  836. struct eqos_priv *eqos = dev_get_priv(dev);
  837. int ret;
  838. bool en_calibration;
  839. debug("%s(dev=%p):\n", __func__, dev);
  840. if (eqos->phy->duplex)
  841. ret = eqos_set_full_duplex(dev);
  842. else
  843. ret = eqos_set_half_duplex(dev);
  844. if (ret < 0) {
  845. pr_err("eqos_set_*_duplex() failed: %d", ret);
  846. return ret;
  847. }
  848. switch (eqos->phy->speed) {
  849. case SPEED_1000:
  850. en_calibration = true;
  851. ret = eqos_set_gmii_speed(dev);
  852. break;
  853. case SPEED_100:
  854. en_calibration = true;
  855. ret = eqos_set_mii_speed_100(dev);
  856. break;
  857. case SPEED_10:
  858. en_calibration = false;
  859. ret = eqos_set_mii_speed_10(dev);
  860. break;
  861. default:
  862. pr_err("invalid speed %d", eqos->phy->speed);
  863. return -EINVAL;
  864. }
  865. if (ret < 0) {
  866. pr_err("eqos_set_*mii_speed*() failed: %d", ret);
  867. return ret;
  868. }
  869. if (en_calibration) {
  870. ret = eqos->config->ops->eqos_calibrate_pads(dev);
  871. if (ret < 0) {
  872. pr_err("eqos_calibrate_pads() failed: %d",
  873. ret);
  874. return ret;
  875. }
  876. } else {
  877. ret = eqos->config->ops->eqos_disable_calibration(dev);
  878. if (ret < 0) {
  879. pr_err("eqos_disable_calibration() failed: %d",
  880. ret);
  881. return ret;
  882. }
  883. }
  884. ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
  885. if (ret < 0) {
  886. pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
  887. return ret;
  888. }
  889. return 0;
  890. }
  891. static int eqos_write_hwaddr(struct udevice *dev)
  892. {
  893. struct eth_pdata *plat = dev_get_plat(dev);
  894. struct eqos_priv *eqos = dev_get_priv(dev);
  895. uint32_t val;
  896. /*
  897. * This function may be called before start() or after stop(). At that
  898. * time, on at least some configurations of the EQoS HW, all clocks to
  899. * the EQoS HW block will be stopped, and a reset signal applied. If
  900. * any register access is attempted in this state, bus timeouts or CPU
  901. * hangs may occur. This check prevents that.
  902. *
  903. * A simple solution to this problem would be to not implement
  904. * write_hwaddr(), since start() always writes the MAC address into HW
  905. * anyway. However, it is desirable to implement write_hwaddr() to
  906. * support the case of SW that runs subsequent to U-Boot which expects
  907. * the MAC address to already be programmed into the EQoS registers,
  908. * which must happen irrespective of whether the U-Boot user (or
  909. * scripts) actually made use of the EQoS device, and hence
  910. * irrespective of whether start() was ever called.
  911. *
  912. * Note that this requirement by subsequent SW is not valid for
  913. * Tegra186, and is likely not valid for any non-PCI instantiation of
  914. * the EQoS HW block. This function is implemented solely as
  915. * future-proofing with the expectation the driver will eventually be
  916. * ported to some system where the expectation above is true.
  917. */
  918. if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
  919. return 0;
  920. /* Update the MAC address */
  921. val = (plat->enetaddr[5] << 8) |
  922. (plat->enetaddr[4]);
  923. writel(val, &eqos->mac_regs->address0_high);
  924. val = (plat->enetaddr[3] << 24) |
  925. (plat->enetaddr[2] << 16) |
  926. (plat->enetaddr[1] << 8) |
  927. (plat->enetaddr[0]);
  928. writel(val, &eqos->mac_regs->address0_low);
  929. return 0;
  930. }
  931. static int eqos_read_rom_hwaddr(struct udevice *dev)
  932. {
  933. struct eth_pdata *pdata = dev_get_plat(dev);
  934. #ifdef CONFIG_ARCH_IMX8M
  935. imx_get_mac_from_fuse(dev_seq(dev), pdata->enetaddr);
  936. #endif
  937. return !is_valid_ethaddr(pdata->enetaddr);
  938. }
  939. static int eqos_start(struct udevice *dev)
  940. {
  941. struct eqos_priv *eqos = dev_get_priv(dev);
  942. int ret, i;
  943. ulong rate;
  944. u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
  945. ulong last_rx_desc;
  946. ulong desc_pad;
  947. debug("%s(dev=%p):\n", __func__, dev);
  948. eqos->tx_desc_idx = 0;
  949. eqos->rx_desc_idx = 0;
  950. ret = eqos->config->ops->eqos_start_clks(dev);
  951. if (ret < 0) {
  952. pr_err("eqos_start_clks() failed: %d", ret);
  953. goto err;
  954. }
  955. ret = eqos->config->ops->eqos_start_resets(dev);
  956. if (ret < 0) {
  957. pr_err("eqos_start_resets() failed: %d", ret);
  958. goto err_stop_clks;
  959. }
  960. udelay(10);
  961. eqos->reg_access_ok = true;
  962. ret = wait_for_bit_le32(&eqos->dma_regs->mode,
  963. EQOS_DMA_MODE_SWR, false,
  964. eqos->config->swr_wait, false);
  965. if (ret) {
  966. pr_err("EQOS_DMA_MODE_SWR stuck");
  967. goto err_stop_resets;
  968. }
  969. ret = eqos->config->ops->eqos_calibrate_pads(dev);
  970. if (ret < 0) {
  971. pr_err("eqos_calibrate_pads() failed: %d", ret);
  972. goto err_stop_resets;
  973. }
  974. rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
  975. val = (rate / 1000000) - 1;
  976. writel(val, &eqos->mac_regs->us_tic_counter);
  977. /*
  978. * if PHY was already connected and configured,
  979. * don't need to reconnect/reconfigure again
  980. */
  981. if (!eqos->phy) {
  982. int addr = -1;
  983. #ifdef CONFIG_DM_ETH_PHY
  984. addr = eth_phy_get_addr(dev);
  985. #endif
  986. #ifdef DWC_NET_PHYADDR
  987. addr = DWC_NET_PHYADDR;
  988. #endif
  989. eqos->phy = phy_connect(eqos->mii, addr, dev,
  990. eqos->config->interface(dev));
  991. if (!eqos->phy) {
  992. pr_err("phy_connect() failed");
  993. goto err_stop_resets;
  994. }
  995. if (eqos->max_speed) {
  996. ret = phy_set_supported(eqos->phy, eqos->max_speed);
  997. if (ret) {
  998. pr_err("phy_set_supported() failed: %d", ret);
  999. goto err_shutdown_phy;
  1000. }
  1001. }
  1002. ret = phy_config(eqos->phy);
  1003. if (ret < 0) {
  1004. pr_err("phy_config() failed: %d", ret);
  1005. goto err_shutdown_phy;
  1006. }
  1007. }
  1008. ret = phy_startup(eqos->phy);
  1009. if (ret < 0) {
  1010. pr_err("phy_startup() failed: %d", ret);
  1011. goto err_shutdown_phy;
  1012. }
  1013. if (!eqos->phy->link) {
  1014. pr_err("No link");
  1015. goto err_shutdown_phy;
  1016. }
  1017. ret = eqos_adjust_link(dev);
  1018. if (ret < 0) {
  1019. pr_err("eqos_adjust_link() failed: %d", ret);
  1020. goto err_shutdown_phy;
  1021. }
  1022. /* Configure MTL */
  1023. /* Enable Store and Forward mode for TX */
  1024. /* Program Tx operating mode */
  1025. setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
  1026. EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
  1027. (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
  1028. EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
  1029. /* Transmit Queue weight */
  1030. writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
  1031. /* Enable Store and Forward mode for RX, since no jumbo frame */
  1032. setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1033. EQOS_MTL_RXQ0_OPERATION_MODE_RSF);
  1034. /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
  1035. val = readl(&eqos->mac_regs->hw_feature1);
  1036. tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
  1037. EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
  1038. rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
  1039. EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
  1040. /*
  1041. * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
  1042. * r/tqs is encoded as (n / 256) - 1.
  1043. */
  1044. tqs = (128 << tx_fifo_sz) / 256 - 1;
  1045. rqs = (128 << rx_fifo_sz) / 256 - 1;
  1046. clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
  1047. EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
  1048. EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
  1049. tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
  1050. clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1051. EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
  1052. EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
  1053. rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
  1054. /* Flow control used only if each channel gets 4KB or more FIFO */
  1055. if (rqs >= ((4096 / 256) - 1)) {
  1056. u32 rfd, rfa;
  1057. setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1058. EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
  1059. /*
  1060. * Set Threshold for Activating Flow Contol space for min 2
  1061. * frames ie, (1500 * 1) = 1500 bytes.
  1062. *
  1063. * Set Threshold for Deactivating Flow Contol for space of
  1064. * min 1 frame (frame size 1500bytes) in receive fifo
  1065. */
  1066. if (rqs == ((4096 / 256) - 1)) {
  1067. /*
  1068. * This violates the above formula because of FIFO size
  1069. * limit therefore overflow may occur inspite of this.
  1070. */
  1071. rfd = 0x3; /* Full-3K */
  1072. rfa = 0x1; /* Full-1.5K */
  1073. } else if (rqs == ((8192 / 256) - 1)) {
  1074. rfd = 0x6; /* Full-4K */
  1075. rfa = 0xa; /* Full-6K */
  1076. } else if (rqs == ((16384 / 256) - 1)) {
  1077. rfd = 0x6; /* Full-4K */
  1078. rfa = 0x12; /* Full-10K */
  1079. } else {
  1080. rfd = 0x6; /* Full-4K */
  1081. rfa = 0x1E; /* Full-16K */
  1082. }
  1083. clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1084. (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
  1085. EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
  1086. (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
  1087. EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
  1088. (rfd <<
  1089. EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
  1090. (rfa <<
  1091. EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
  1092. }
  1093. /* Configure MAC */
  1094. clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
  1095. EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
  1096. EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
  1097. eqos->config->config_mac <<
  1098. EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
  1099. /* Multicast and Broadcast Queue Enable */
  1100. setbits_le32(&eqos->mac_regs->unused_0a4,
  1101. 0x00100000);
  1102. /* enable promise mode */
  1103. setbits_le32(&eqos->mac_regs->unused_004[1],
  1104. 0x1);
  1105. /* Set TX flow control parameters */
  1106. /* Set Pause Time */
  1107. setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
  1108. 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
  1109. /* Assign priority for TX flow control */
  1110. clrbits_le32(&eqos->mac_regs->txq_prty_map0,
  1111. EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
  1112. EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
  1113. /* Assign priority for RX flow control */
  1114. clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
  1115. EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
  1116. EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
  1117. /* Enable flow control */
  1118. setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
  1119. EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
  1120. setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
  1121. EQOS_MAC_RX_FLOW_CTRL_RFE);
  1122. clrsetbits_le32(&eqos->mac_regs->configuration,
  1123. EQOS_MAC_CONFIGURATION_GPSLCE |
  1124. EQOS_MAC_CONFIGURATION_WD |
  1125. EQOS_MAC_CONFIGURATION_JD |
  1126. EQOS_MAC_CONFIGURATION_JE,
  1127. EQOS_MAC_CONFIGURATION_CST |
  1128. EQOS_MAC_CONFIGURATION_ACS);
  1129. eqos_write_hwaddr(dev);
  1130. /* Configure DMA */
  1131. /* Enable OSP mode */
  1132. setbits_le32(&eqos->dma_regs->ch0_tx_control,
  1133. EQOS_DMA_CH0_TX_CONTROL_OSP);
  1134. /* RX buffer size. Must be a multiple of bus width */
  1135. clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
  1136. EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
  1137. EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
  1138. EQOS_MAX_PACKET_SIZE <<
  1139. EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
  1140. desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) /
  1141. eqos->config->axi_bus_width;
  1142. setbits_le32(&eqos->dma_regs->ch0_control,
  1143. EQOS_DMA_CH0_CONTROL_PBLX8 |
  1144. (desc_pad << EQOS_DMA_CH0_CONTROL_DSL_SHIFT));
  1145. /*
  1146. * Burst length must be < 1/2 FIFO size.
  1147. * FIFO size in tqs is encoded as (n / 256) - 1.
  1148. * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
  1149. * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
  1150. */
  1151. pbl = tqs + 1;
  1152. if (pbl > 32)
  1153. pbl = 32;
  1154. clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
  1155. EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
  1156. EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
  1157. pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
  1158. clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
  1159. EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
  1160. EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
  1161. 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
  1162. /* DMA performance configuration */
  1163. val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
  1164. EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
  1165. EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
  1166. writel(val, &eqos->dma_regs->sysbus_mode);
  1167. /* Set up descriptors */
  1168. memset(eqos->descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_NUM);
  1169. for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
  1170. struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
  1171. eqos->config->ops->eqos_flush_desc(tx_desc);
  1172. }
  1173. for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
  1174. struct eqos_desc *rx_desc = eqos_get_desc(eqos, i, true);
  1175. rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
  1176. (i * EQOS_MAX_PACKET_SIZE));
  1177. rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
  1178. mb();
  1179. eqos->config->ops->eqos_flush_desc(rx_desc);
  1180. eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
  1181. (i * EQOS_MAX_PACKET_SIZE),
  1182. EQOS_MAX_PACKET_SIZE);
  1183. }
  1184. writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
  1185. writel((ulong)eqos_get_desc(eqos, 0, false),
  1186. &eqos->dma_regs->ch0_txdesc_list_address);
  1187. writel(EQOS_DESCRIPTORS_TX - 1,
  1188. &eqos->dma_regs->ch0_txdesc_ring_length);
  1189. writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
  1190. writel((ulong)eqos_get_desc(eqos, 0, true),
  1191. &eqos->dma_regs->ch0_rxdesc_list_address);
  1192. writel(EQOS_DESCRIPTORS_RX - 1,
  1193. &eqos->dma_regs->ch0_rxdesc_ring_length);
  1194. /* Enable everything */
  1195. setbits_le32(&eqos->dma_regs->ch0_tx_control,
  1196. EQOS_DMA_CH0_TX_CONTROL_ST);
  1197. setbits_le32(&eqos->dma_regs->ch0_rx_control,
  1198. EQOS_DMA_CH0_RX_CONTROL_SR);
  1199. setbits_le32(&eqos->mac_regs->configuration,
  1200. EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
  1201. /* TX tail pointer not written until we need to TX a packet */
  1202. /*
  1203. * Point RX tail pointer at last descriptor. Ideally, we'd point at the
  1204. * first descriptor, implying all descriptors were available. However,
  1205. * that's not distinguishable from none of the descriptors being
  1206. * available.
  1207. */
  1208. last_rx_desc = (ulong)eqos_get_desc(eqos, EQOS_DESCRIPTORS_RX - 1, true);
  1209. writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
  1210. eqos->started = true;
  1211. debug("%s: OK\n", __func__);
  1212. return 0;
  1213. err_shutdown_phy:
  1214. phy_shutdown(eqos->phy);
  1215. err_stop_resets:
  1216. eqos->config->ops->eqos_stop_resets(dev);
  1217. err_stop_clks:
  1218. eqos->config->ops->eqos_stop_clks(dev);
  1219. err:
  1220. pr_err("FAILED: %d", ret);
  1221. return ret;
  1222. }
  1223. static void eqos_stop(struct udevice *dev)
  1224. {
  1225. struct eqos_priv *eqos = dev_get_priv(dev);
  1226. int i;
  1227. debug("%s(dev=%p):\n", __func__, dev);
  1228. if (!eqos->started)
  1229. return;
  1230. eqos->started = false;
  1231. eqos->reg_access_ok = false;
  1232. /* Disable TX DMA */
  1233. clrbits_le32(&eqos->dma_regs->ch0_tx_control,
  1234. EQOS_DMA_CH0_TX_CONTROL_ST);
  1235. /* Wait for TX all packets to drain out of MTL */
  1236. for (i = 0; i < 1000000; i++) {
  1237. u32 val = readl(&eqos->mtl_regs->txq0_debug);
  1238. u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
  1239. EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
  1240. u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
  1241. if ((trcsts != 1) && (!txqsts))
  1242. break;
  1243. }
  1244. /* Turn off MAC TX and RX */
  1245. clrbits_le32(&eqos->mac_regs->configuration,
  1246. EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
  1247. /* Wait for all RX packets to drain out of MTL */
  1248. for (i = 0; i < 1000000; i++) {
  1249. u32 val = readl(&eqos->mtl_regs->rxq0_debug);
  1250. u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
  1251. EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
  1252. u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
  1253. EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
  1254. if ((!prxq) && (!rxqsts))
  1255. break;
  1256. }
  1257. /* Turn off RX DMA */
  1258. clrbits_le32(&eqos->dma_regs->ch0_rx_control,
  1259. EQOS_DMA_CH0_RX_CONTROL_SR);
  1260. if (eqos->phy) {
  1261. phy_shutdown(eqos->phy);
  1262. }
  1263. eqos->config->ops->eqos_stop_resets(dev);
  1264. eqos->config->ops->eqos_stop_clks(dev);
  1265. debug("%s: OK\n", __func__);
  1266. }
  1267. static int eqos_send(struct udevice *dev, void *packet, int length)
  1268. {
  1269. struct eqos_priv *eqos = dev_get_priv(dev);
  1270. struct eqos_desc *tx_desc;
  1271. int i;
  1272. debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
  1273. length);
  1274. memcpy(eqos->tx_dma_buf, packet, length);
  1275. eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
  1276. tx_desc = eqos_get_desc(eqos, eqos->tx_desc_idx, false);
  1277. eqos->tx_desc_idx++;
  1278. eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
  1279. tx_desc->des0 = (ulong)eqos->tx_dma_buf;
  1280. tx_desc->des1 = 0;
  1281. tx_desc->des2 = length;
  1282. /*
  1283. * Make sure that if HW sees the _OWN write below, it will see all the
  1284. * writes to the rest of the descriptor too.
  1285. */
  1286. mb();
  1287. tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
  1288. eqos->config->ops->eqos_flush_desc(tx_desc);
  1289. writel((ulong)eqos_get_desc(eqos, eqos->tx_desc_idx, false),
  1290. &eqos->dma_regs->ch0_txdesc_tail_pointer);
  1291. for (i = 0; i < 1000000; i++) {
  1292. eqos->config->ops->eqos_inval_desc(tx_desc);
  1293. if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
  1294. return 0;
  1295. udelay(1);
  1296. }
  1297. debug("%s: TX timeout\n", __func__);
  1298. return -ETIMEDOUT;
  1299. }
  1300. static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
  1301. {
  1302. struct eqos_priv *eqos = dev_get_priv(dev);
  1303. struct eqos_desc *rx_desc;
  1304. int length;
  1305. debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
  1306. rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
  1307. eqos->config->ops->eqos_inval_desc(rx_desc);
  1308. if (rx_desc->des3 & EQOS_DESC3_OWN) {
  1309. debug("%s: RX packet not available\n", __func__);
  1310. return -EAGAIN;
  1311. }
  1312. *packetp = eqos->rx_dma_buf +
  1313. (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
  1314. length = rx_desc->des3 & 0x7fff;
  1315. debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
  1316. eqos->config->ops->eqos_inval_buffer(*packetp, length);
  1317. return length;
  1318. }
  1319. static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
  1320. {
  1321. struct eqos_priv *eqos = dev_get_priv(dev);
  1322. uchar *packet_expected;
  1323. struct eqos_desc *rx_desc;
  1324. debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
  1325. packet_expected = eqos->rx_dma_buf +
  1326. (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
  1327. if (packet != packet_expected) {
  1328. debug("%s: Unexpected packet (expected %p)\n", __func__,
  1329. packet_expected);
  1330. return -EINVAL;
  1331. }
  1332. eqos->config->ops->eqos_inval_buffer(packet, length);
  1333. rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
  1334. rx_desc->des0 = 0;
  1335. mb();
  1336. eqos->config->ops->eqos_flush_desc(rx_desc);
  1337. eqos->config->ops->eqos_inval_buffer(packet, length);
  1338. rx_desc->des0 = (u32)(ulong)packet;
  1339. rx_desc->des1 = 0;
  1340. rx_desc->des2 = 0;
  1341. /*
  1342. * Make sure that if HW sees the _OWN write below, it will see all the
  1343. * writes to the rest of the descriptor too.
  1344. */
  1345. mb();
  1346. rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
  1347. eqos->config->ops->eqos_flush_desc(rx_desc);
  1348. writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
  1349. eqos->rx_desc_idx++;
  1350. eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
  1351. return 0;
  1352. }
  1353. static int eqos_probe_resources_core(struct udevice *dev)
  1354. {
  1355. struct eqos_priv *eqos = dev_get_priv(dev);
  1356. int ret;
  1357. debug("%s(dev=%p):\n", __func__, dev);
  1358. eqos->descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_NUM);
  1359. if (!eqos->descs) {
  1360. debug("%s: eqos_alloc_descs() failed\n", __func__);
  1361. ret = -ENOMEM;
  1362. goto err;
  1363. }
  1364. eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
  1365. if (!eqos->tx_dma_buf) {
  1366. debug("%s: memalign(tx_dma_buf) failed\n", __func__);
  1367. ret = -ENOMEM;
  1368. goto err_free_descs;
  1369. }
  1370. debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
  1371. eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
  1372. if (!eqos->rx_dma_buf) {
  1373. debug("%s: memalign(rx_dma_buf) failed\n", __func__);
  1374. ret = -ENOMEM;
  1375. goto err_free_tx_dma_buf;
  1376. }
  1377. debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
  1378. eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
  1379. if (!eqos->rx_pkt) {
  1380. debug("%s: malloc(rx_pkt) failed\n", __func__);
  1381. ret = -ENOMEM;
  1382. goto err_free_rx_dma_buf;
  1383. }
  1384. debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
  1385. eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
  1386. EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
  1387. debug("%s: OK\n", __func__);
  1388. return 0;
  1389. err_free_rx_dma_buf:
  1390. free(eqos->rx_dma_buf);
  1391. err_free_tx_dma_buf:
  1392. free(eqos->tx_dma_buf);
  1393. err_free_descs:
  1394. eqos_free_descs(eqos->descs);
  1395. err:
  1396. debug("%s: returns %d\n", __func__, ret);
  1397. return ret;
  1398. }
  1399. static int eqos_remove_resources_core(struct udevice *dev)
  1400. {
  1401. struct eqos_priv *eqos = dev_get_priv(dev);
  1402. debug("%s(dev=%p):\n", __func__, dev);
  1403. free(eqos->rx_pkt);
  1404. free(eqos->rx_dma_buf);
  1405. free(eqos->tx_dma_buf);
  1406. eqos_free_descs(eqos->descs);
  1407. debug("%s: OK\n", __func__);
  1408. return 0;
  1409. }
  1410. static int eqos_probe_resources_tegra186(struct udevice *dev)
  1411. {
  1412. struct eqos_priv *eqos = dev_get_priv(dev);
  1413. int ret;
  1414. debug("%s(dev=%p):\n", __func__, dev);
  1415. ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
  1416. if (ret) {
  1417. pr_err("reset_get_by_name(rst) failed: %d", ret);
  1418. return ret;
  1419. }
  1420. ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
  1421. &eqos->phy_reset_gpio,
  1422. GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
  1423. if (ret) {
  1424. pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
  1425. goto err_free_reset_eqos;
  1426. }
  1427. ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
  1428. if (ret) {
  1429. pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
  1430. goto err_free_gpio_phy_reset;
  1431. }
  1432. ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
  1433. if (ret) {
  1434. pr_err("clk_get_by_name(master_bus) failed: %d", ret);
  1435. goto err_free_clk_slave_bus;
  1436. }
  1437. ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
  1438. if (ret) {
  1439. pr_err("clk_get_by_name(rx) failed: %d", ret);
  1440. goto err_free_clk_master_bus;
  1441. }
  1442. ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
  1443. if (ret) {
  1444. pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
  1445. goto err_free_clk_rx;
  1446. return ret;
  1447. }
  1448. ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
  1449. if (ret) {
  1450. pr_err("clk_get_by_name(tx) failed: %d", ret);
  1451. goto err_free_clk_ptp_ref;
  1452. }
  1453. debug("%s: OK\n", __func__);
  1454. return 0;
  1455. err_free_clk_ptp_ref:
  1456. clk_free(&eqos->clk_ptp_ref);
  1457. err_free_clk_rx:
  1458. clk_free(&eqos->clk_rx);
  1459. err_free_clk_master_bus:
  1460. clk_free(&eqos->clk_master_bus);
  1461. err_free_clk_slave_bus:
  1462. clk_free(&eqos->clk_slave_bus);
  1463. err_free_gpio_phy_reset:
  1464. dm_gpio_free(dev, &eqos->phy_reset_gpio);
  1465. err_free_reset_eqos:
  1466. reset_free(&eqos->reset_ctl);
  1467. debug("%s: returns %d\n", __func__, ret);
  1468. return ret;
  1469. }
  1470. /* board-specific Ethernet Interface initializations. */
  1471. __weak int board_interface_eth_init(struct udevice *dev,
  1472. phy_interface_t interface_type)
  1473. {
  1474. return 0;
  1475. }
  1476. static int eqos_probe_resources_stm32(struct udevice *dev)
  1477. {
  1478. struct eqos_priv *eqos = dev_get_priv(dev);
  1479. int ret;
  1480. phy_interface_t interface;
  1481. debug("%s(dev=%p):\n", __func__, dev);
  1482. interface = eqos->config->interface(dev);
  1483. if (interface == PHY_INTERFACE_MODE_NONE) {
  1484. pr_err("Invalid PHY interface\n");
  1485. return -EINVAL;
  1486. }
  1487. ret = board_interface_eth_init(dev, interface);
  1488. if (ret)
  1489. return -EINVAL;
  1490. eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
  1491. ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
  1492. if (ret) {
  1493. pr_err("clk_get_by_name(master_bus) failed: %d", ret);
  1494. goto err_probe;
  1495. }
  1496. ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
  1497. if (ret) {
  1498. pr_err("clk_get_by_name(rx) failed: %d", ret);
  1499. goto err_free_clk_master_bus;
  1500. }
  1501. ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
  1502. if (ret) {
  1503. pr_err("clk_get_by_name(tx) failed: %d", ret);
  1504. goto err_free_clk_rx;
  1505. }
  1506. /* Get ETH_CLK clocks (optional) */
  1507. ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
  1508. if (ret)
  1509. pr_warn("No phy clock provided %d", ret);
  1510. debug("%s: OK\n", __func__);
  1511. return 0;
  1512. err_free_clk_rx:
  1513. clk_free(&eqos->clk_rx);
  1514. err_free_clk_master_bus:
  1515. clk_free(&eqos->clk_master_bus);
  1516. err_probe:
  1517. debug("%s: returns %d\n", __func__, ret);
  1518. return ret;
  1519. }
  1520. static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
  1521. {
  1522. const char *phy_mode;
  1523. phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
  1524. debug("%s(dev=%p):\n", __func__, dev);
  1525. phy_mode = dev_read_prop(dev, "phy-mode", NULL);
  1526. if (phy_mode)
  1527. interface = phy_get_interface_by_name(phy_mode);
  1528. return interface;
  1529. }
  1530. static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
  1531. {
  1532. return PHY_INTERFACE_MODE_MII;
  1533. }
  1534. static int eqos_probe_resources_imx(struct udevice *dev)
  1535. {
  1536. struct eqos_priv *eqos = dev_get_priv(dev);
  1537. phy_interface_t interface;
  1538. debug("%s(dev=%p):\n", __func__, dev);
  1539. interface = eqos->config->interface(dev);
  1540. if (interface == PHY_INTERFACE_MODE_NONE) {
  1541. pr_err("Invalid PHY interface\n");
  1542. return -EINVAL;
  1543. }
  1544. debug("%s: OK\n", __func__);
  1545. return 0;
  1546. }
  1547. static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
  1548. {
  1549. const char *phy_mode;
  1550. phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
  1551. debug("%s(dev=%p):\n", __func__, dev);
  1552. phy_mode = dev_read_prop(dev, "phy-mode", NULL);
  1553. if (phy_mode)
  1554. interface = phy_get_interface_by_name(phy_mode);
  1555. return interface;
  1556. }
  1557. static int eqos_probe_resources_jh7110(struct udevice *dev)
  1558. {
  1559. struct eqos_priv *eqos = dev_get_priv(dev);
  1560. phy_interface_t interface;
  1561. int ret;
  1562. interface = eqos->config->interface(dev);
  1563. if (interface == PHY_INTERFACE_MODE_NONE) {
  1564. pr_err("Invalid PHY interface\n");
  1565. return -EINVAL;
  1566. }
  1567. ret = reset_get_bulk(dev, &eqos->reset_bulk);
  1568. if (ret) {
  1569. pr_err("Can't get reset: %d\n", ret);
  1570. return ret;
  1571. }
  1572. ret = clk_get_by_name(dev, "gtx", &eqos->clk_tx);
  1573. if (ret) {
  1574. pr_err("clk_get_by_name(gtx) failed: %d", ret);
  1575. goto err_free_reset_eqos;
  1576. }
  1577. #if CONFIG_IS_ENABLED(TARGET_STARFIVE_VISIONFIVE2)
  1578. ret = clk_get_by_name(dev, "rmii_rtx", &eqos->rmii_rtx);
  1579. if (ret) {
  1580. pr_err("clk_get_by_name(rmii_rtx) failed: %d", ret);
  1581. goto err_free_reset_eqos;
  1582. }
  1583. #endif
  1584. ret = clk_get_bulk(dev, &eqos->clk_bulk);
  1585. if (ret) {
  1586. pr_err("clk_get_bulk failed: %d", ret);
  1587. goto err_free_clk_gtx;
  1588. }
  1589. debug("%s: OK\n", __func__);
  1590. return 0;
  1591. err_free_clk_gtx:
  1592. clk_free(&eqos->clk_tx);
  1593. err_free_reset_eqos:
  1594. reset_release_bulk(&eqos->reset_bulk);
  1595. return ret;
  1596. }
  1597. static phy_interface_t eqos_get_interface_jh7110(struct udevice *dev)
  1598. {
  1599. const char *phy_mode;
  1600. phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
  1601. phy_mode = dev_read_prop(dev, "phy-mode", NULL);
  1602. if (phy_mode)
  1603. interface = phy_get_interface_by_name(phy_mode);
  1604. return interface;
  1605. }
  1606. static int eqos_remove_resources_tegra186(struct udevice *dev)
  1607. {
  1608. struct eqos_priv *eqos = dev_get_priv(dev);
  1609. debug("%s(dev=%p):\n", __func__, dev);
  1610. #ifdef CONFIG_CLK
  1611. clk_free(&eqos->clk_tx);
  1612. clk_free(&eqos->clk_ptp_ref);
  1613. clk_free(&eqos->clk_rx);
  1614. clk_free(&eqos->clk_slave_bus);
  1615. clk_free(&eqos->clk_master_bus);
  1616. #endif
  1617. dm_gpio_free(dev, &eqos->phy_reset_gpio);
  1618. reset_free(&eqos->reset_ctl);
  1619. debug("%s: OK\n", __func__);
  1620. return 0;
  1621. }
  1622. static int eqos_remove_resources_stm32(struct udevice *dev)
  1623. {
  1624. #ifdef CONFIG_CLK
  1625. struct eqos_priv *eqos = dev_get_priv(dev);
  1626. debug("%s(dev=%p):\n", __func__, dev);
  1627. clk_free(&eqos->clk_tx);
  1628. clk_free(&eqos->clk_rx);
  1629. clk_free(&eqos->clk_master_bus);
  1630. if (clk_valid(&eqos->clk_ck))
  1631. clk_free(&eqos->clk_ck);
  1632. #endif
  1633. if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
  1634. dm_gpio_free(dev, &eqos->phy_reset_gpio);
  1635. debug("%s: OK\n", __func__);
  1636. return 0;
  1637. }
  1638. static int eqos_remove_resources_jh7110(struct udevice *dev)
  1639. {
  1640. struct eqos_priv *eqos = dev_get_priv(dev);
  1641. reset_release_bulk(&eqos->reset_bulk);
  1642. clk_release_bulk(&eqos->clk_bulk);
  1643. return 0;
  1644. }
  1645. static int eqos_probe(struct udevice *dev)
  1646. {
  1647. struct eqos_priv *eqos = dev_get_priv(dev);
  1648. int ret;
  1649. debug("%s(dev=%p):\n", __func__, dev);
  1650. eqos->dev = dev;
  1651. eqos->config = (void *)dev_get_driver_data(dev);
  1652. eqos->regs = dev_read_addr(dev);
  1653. if (eqos->regs == FDT_ADDR_T_NONE) {
  1654. pr_err("dev_read_addr() failed");
  1655. return -ENODEV;
  1656. }
  1657. eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
  1658. eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
  1659. eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
  1660. eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
  1661. ret = eqos_probe_resources_core(dev);
  1662. if (ret < 0) {
  1663. pr_err("eqos_probe_resources_core() failed: %d", ret);
  1664. return ret;
  1665. }
  1666. ret = eqos->config->ops->eqos_probe_resources(dev);
  1667. if (ret < 0) {
  1668. pr_err("eqos_probe_resources() failed: %d", ret);
  1669. goto err_remove_resources_core;
  1670. }
  1671. #ifdef CONFIG_DM_ETH_PHY
  1672. eqos->mii = eth_phy_get_mdio_bus(dev);
  1673. #endif
  1674. if (!eqos->mii) {
  1675. eqos->mii = mdio_alloc();
  1676. if (!eqos->mii) {
  1677. pr_err("mdio_alloc() failed");
  1678. ret = -ENOMEM;
  1679. goto err_remove_resources_tegra;
  1680. }
  1681. eqos->mii->read = eqos_mdio_read;
  1682. eqos->mii->write = eqos_mdio_write;
  1683. eqos->mii->priv = eqos;
  1684. strcpy(eqos->mii->name, dev->name);
  1685. ret = mdio_register(eqos->mii);
  1686. if (ret < 0) {
  1687. pr_err("mdio_register() failed: %d", ret);
  1688. goto err_free_mdio;
  1689. }
  1690. }
  1691. #ifdef CONFIG_DM_ETH_PHY
  1692. eth_phy_set_mdio_bus(dev, eqos->mii);
  1693. #endif
  1694. debug("%s: OK\n", __func__);
  1695. return 0;
  1696. err_free_mdio:
  1697. mdio_free(eqos->mii);
  1698. err_remove_resources_tegra:
  1699. eqos->config->ops->eqos_remove_resources(dev);
  1700. err_remove_resources_core:
  1701. eqos_remove_resources_core(dev);
  1702. debug("%s: returns %d\n", __func__, ret);
  1703. return ret;
  1704. }
  1705. static int eqos_remove(struct udevice *dev)
  1706. {
  1707. struct eqos_priv *eqos = dev_get_priv(dev);
  1708. debug("%s(dev=%p):\n", __func__, dev);
  1709. mdio_unregister(eqos->mii);
  1710. mdio_free(eqos->mii);
  1711. eqos->config->ops->eqos_remove_resources(dev);
  1712. eqos_probe_resources_core(dev);
  1713. debug("%s: OK\n", __func__);
  1714. return 0;
  1715. }
  1716. static int eqos_null_ops(struct udevice *dev)
  1717. {
  1718. return 0;
  1719. }
  1720. static const struct eth_ops eqos_ops = {
  1721. .start = eqos_start,
  1722. .stop = eqos_stop,
  1723. .send = eqos_send,
  1724. .recv = eqos_recv,
  1725. .free_pkt = eqos_free_pkt,
  1726. .write_hwaddr = eqos_write_hwaddr,
  1727. .read_rom_hwaddr = eqos_read_rom_hwaddr,
  1728. };
  1729. static struct eqos_ops eqos_tegra186_ops = {
  1730. .eqos_inval_desc = eqos_inval_desc_generic,
  1731. .eqos_flush_desc = eqos_flush_desc_generic,
  1732. .eqos_inval_buffer = eqos_inval_buffer_tegra186,
  1733. .eqos_flush_buffer = eqos_flush_buffer_tegra186,
  1734. .eqos_probe_resources = eqos_probe_resources_tegra186,
  1735. .eqos_remove_resources = eqos_remove_resources_tegra186,
  1736. .eqos_stop_resets = eqos_stop_resets_tegra186,
  1737. .eqos_start_resets = eqos_start_resets_tegra186,
  1738. .eqos_stop_clks = eqos_stop_clks_tegra186,
  1739. .eqos_start_clks = eqos_start_clks_tegra186,
  1740. .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
  1741. .eqos_disable_calibration = eqos_disable_calibration_tegra186,
  1742. .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
  1743. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
  1744. };
  1745. static const struct eqos_config __maybe_unused eqos_tegra186_config = {
  1746. .reg_access_always_ok = false,
  1747. .mdio_wait = 10,
  1748. .swr_wait = 10,
  1749. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
  1750. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
  1751. .axi_bus_width = EQOS_AXI_WIDTH_128,
  1752. .interface = eqos_get_interface_tegra186,
  1753. .ops = &eqos_tegra186_ops
  1754. };
  1755. static struct eqos_ops eqos_stm32_ops = {
  1756. .eqos_inval_desc = eqos_inval_desc_generic,
  1757. .eqos_flush_desc = eqos_flush_desc_generic,
  1758. .eqos_inval_buffer = eqos_inval_buffer_generic,
  1759. .eqos_flush_buffer = eqos_flush_buffer_generic,
  1760. .eqos_probe_resources = eqos_probe_resources_stm32,
  1761. .eqos_remove_resources = eqos_remove_resources_stm32,
  1762. .eqos_stop_resets = eqos_null_ops,
  1763. .eqos_start_resets = eqos_null_ops,
  1764. .eqos_stop_clks = eqos_stop_clks_stm32,
  1765. .eqos_start_clks = eqos_start_clks_stm32,
  1766. .eqos_calibrate_pads = eqos_null_ops,
  1767. .eqos_disable_calibration = eqos_null_ops,
  1768. .eqos_set_tx_clk_speed = eqos_null_ops,
  1769. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
  1770. };
  1771. static const struct eqos_config __maybe_unused eqos_stm32_config = {
  1772. .reg_access_always_ok = false,
  1773. .mdio_wait = 10000,
  1774. .swr_wait = 50,
  1775. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
  1776. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
  1777. .axi_bus_width = EQOS_AXI_WIDTH_64,
  1778. .interface = eqos_get_interface_stm32,
  1779. .ops = &eqos_stm32_ops
  1780. };
  1781. static struct eqos_ops eqos_imx_ops = {
  1782. .eqos_inval_desc = eqos_inval_desc_generic,
  1783. .eqos_flush_desc = eqos_flush_desc_generic,
  1784. .eqos_inval_buffer = eqos_inval_buffer_generic,
  1785. .eqos_flush_buffer = eqos_flush_buffer_generic,
  1786. .eqos_probe_resources = eqos_probe_resources_imx,
  1787. .eqos_remove_resources = eqos_null_ops,
  1788. .eqos_stop_resets = eqos_null_ops,
  1789. .eqos_start_resets = eqos_null_ops,
  1790. .eqos_stop_clks = eqos_null_ops,
  1791. .eqos_start_clks = eqos_null_ops,
  1792. .eqos_calibrate_pads = eqos_null_ops,
  1793. .eqos_disable_calibration = eqos_null_ops,
  1794. .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
  1795. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
  1796. };
  1797. struct eqos_config __maybe_unused eqos_imx_config = {
  1798. .reg_access_always_ok = false,
  1799. .mdio_wait = 10,
  1800. .swr_wait = 50,
  1801. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
  1802. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
  1803. .axi_bus_width = EQOS_AXI_WIDTH_64,
  1804. .interface = eqos_get_interface_imx,
  1805. .ops = &eqos_imx_ops
  1806. };
  1807. static struct eqos_ops eqos_jh7110_ops = {
  1808. .eqos_inval_desc = eqos_inval_desc_generic,
  1809. .eqos_flush_desc = eqos_flush_desc_generic,
  1810. .eqos_inval_buffer = eqos_inval_buffer_generic,
  1811. .eqos_flush_buffer = eqos_flush_buffer_generic,
  1812. .eqos_probe_resources = eqos_probe_resources_jh7110,
  1813. .eqos_remove_resources = eqos_remove_resources_jh7110,
  1814. .eqos_stop_resets = eqos_stop_resets_jh7110,
  1815. .eqos_start_resets = eqos_start_resets_jh7110,
  1816. .eqos_stop_clks = eqos_stop_clks_jh7110,
  1817. .eqos_start_clks = eqos_start_clks_jh7110,
  1818. .eqos_calibrate_pads = eqos_null_ops,
  1819. .eqos_disable_calibration = eqos_null_ops,
  1820. .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_jh7110,
  1821. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_jh7110
  1822. };
  1823. struct eqos_config __maybe_unused eqos_jh7110_config = {
  1824. .reg_access_always_ok = false,
  1825. .mdio_wait = 10,
  1826. .swr_wait = 50,
  1827. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
  1828. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
  1829. .axi_bus_width = EQOS_AXI_WIDTH_64,
  1830. .interface = eqos_get_interface_jh7110,
  1831. .ops = &eqos_jh7110_ops
  1832. };
  1833. static const struct udevice_id eqos_ids[] = {
  1834. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_TEGRA186)
  1835. {
  1836. .compatible = "nvidia,tegra186-eqos",
  1837. .data = (ulong)&eqos_tegra186_config
  1838. },
  1839. #endif
  1840. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_STM32)
  1841. {
  1842. .compatible = "st,stm32mp1-dwmac",
  1843. .data = (ulong)&eqos_stm32_config
  1844. },
  1845. #endif
  1846. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_IMX)
  1847. {
  1848. .compatible = "fsl,imx-eqos",
  1849. .data = (ulong)&eqos_imx_config
  1850. },
  1851. #endif
  1852. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_STARFIVE)
  1853. {
  1854. .compatible = "starfive,jh7110-eqos-5.20",
  1855. .data = (ulong)&eqos_jh7110_config
  1856. },
  1857. #endif
  1858. { }
  1859. };
  1860. U_BOOT_DRIVER(eth_eqos) = {
  1861. .name = "eth_eqos",
  1862. .id = UCLASS_ETH,
  1863. .of_match = of_match_ptr(eqos_ids),
  1864. .probe = eqos_probe,
  1865. .remove = eqos_remove,
  1866. .ops = &eqos_ops,
  1867. .priv_auto = sizeof(struct eqos_priv),
  1868. .plat_auto = sizeof(struct eth_pdata),
  1869. };