dwc_eth_qos.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016, NVIDIA CORPORATION.
  4. *
  5. * Portions based on U-Boot's rtl8169.c.
  6. */
  7. /*
  8. * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
  9. * Service) IP block. The IP supports multiple options for bus type, clocking/
  10. * reset structure, and feature list.
  11. *
  12. * The driver is written such that generic core logic is kept separate from
  13. * configuration-specific logic. Code that interacts with configuration-
  14. * specific resources is split out into separate functions to avoid polluting
  15. * common code. If/when this driver is enhanced to support multiple
  16. * configurations, the core code should be adapted to call all configuration-
  17. * specific functions through function pointers, with the definition of those
  18. * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
  19. * field.
  20. *
  21. * The following configurations are currently supported:
  22. * tegra186:
  23. * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
  24. * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
  25. * supports a single RGMII PHY. This configuration also has SW control over
  26. * all clock and reset signals to the HW block.
  27. */
  28. #define LOG_CATEGORY UCLASS_ETH
  29. #include <common.h>
  30. #include <clk.h>
  31. #include <cpu_func.h>
  32. #include <dm.h>
  33. #include <errno.h>
  34. #include <log.h>
  35. #include <malloc.h>
  36. #include <memalign.h>
  37. #include <miiphy.h>
  38. #include <net.h>
  39. #include <netdev.h>
  40. #include <phy.h>
  41. #include <reset.h>
  42. #include <wait_bit.h>
  43. #include <asm/cache.h>
  44. #include <asm/gpio.h>
  45. #include <asm/io.h>
  46. #include <eth_phy.h>
  47. #ifdef CONFIG_ARCH_IMX8M
  48. #include <asm/arch/clock.h>
  49. #include <asm/mach-imx/sys_proto.h>
  50. #endif
  51. #include <linux/bitops.h>
  52. #include <linux/delay.h>
  53. /* Core registers */
  54. #define EQOS_MAC_REGS_BASE 0x000
  55. struct eqos_mac_regs {
  56. uint32_t configuration; /* 0x000 */
  57. uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
  58. uint32_t q0_tx_flow_ctrl; /* 0x070 */
  59. uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
  60. uint32_t rx_flow_ctrl; /* 0x090 */
  61. uint32_t unused_094; /* 0x094 */
  62. uint32_t txq_prty_map0; /* 0x098 */
  63. uint32_t unused_09c; /* 0x09c */
  64. uint32_t rxq_ctrl0; /* 0x0a0 */
  65. uint32_t unused_0a4; /* 0x0a4 */
  66. uint32_t rxq_ctrl2; /* 0x0a8 */
  67. uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
  68. uint32_t us_tic_counter; /* 0x0dc */
  69. uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
  70. uint32_t hw_feature0; /* 0x11c */
  71. uint32_t hw_feature1; /* 0x120 */
  72. uint32_t hw_feature2; /* 0x124 */
  73. uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
  74. uint32_t mdio_address; /* 0x200 */
  75. uint32_t mdio_data; /* 0x204 */
  76. uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
  77. uint32_t address0_high; /* 0x300 */
  78. uint32_t address0_low; /* 0x304 */
  79. };
  80. #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
  81. #define EQOS_MAC_CONFIGURATION_CST BIT(21)
  82. #define EQOS_MAC_CONFIGURATION_ACS BIT(20)
  83. #define EQOS_MAC_CONFIGURATION_WD BIT(19)
  84. #define EQOS_MAC_CONFIGURATION_JD BIT(17)
  85. #define EQOS_MAC_CONFIGURATION_JE BIT(16)
  86. #define EQOS_MAC_CONFIGURATION_PS BIT(15)
  87. #define EQOS_MAC_CONFIGURATION_FES BIT(14)
  88. #define EQOS_MAC_CONFIGURATION_DM BIT(13)
  89. #define EQOS_MAC_CONFIGURATION_LM BIT(12)
  90. #define EQOS_MAC_CONFIGURATION_TE BIT(1)
  91. #define EQOS_MAC_CONFIGURATION_RE BIT(0)
  92. #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
  93. #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
  94. #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
  95. #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
  96. #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
  97. #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
  98. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
  99. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
  100. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
  101. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
  102. #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
  103. #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
  104. #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
  105. #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
  106. #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
  107. #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
  108. #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
  109. #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
  110. #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
  111. #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
  112. #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
  113. #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
  114. #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
  115. #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
  116. #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
  117. #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
  118. #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
  119. #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
  120. #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
  121. #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
  122. #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
  123. #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
  124. #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
  125. #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
  126. #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
  127. #define EQOS_MTL_REGS_BASE 0xd00
  128. struct eqos_mtl_regs {
  129. uint32_t txq0_operation_mode; /* 0xd00 */
  130. uint32_t unused_d04; /* 0xd04 */
  131. uint32_t txq0_debug; /* 0xd08 */
  132. uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
  133. uint32_t txq0_quantum_weight; /* 0xd18 */
  134. uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
  135. uint32_t rxq0_operation_mode; /* 0xd30 */
  136. uint32_t unused_d34; /* 0xd34 */
  137. uint32_t rxq0_debug; /* 0xd38 */
  138. };
  139. #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
  140. #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
  141. #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
  142. #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
  143. #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
  144. #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
  145. #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
  146. #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
  147. #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
  148. #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
  149. #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
  150. #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
  151. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
  152. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
  153. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
  154. #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
  155. #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
  156. #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
  157. #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
  158. #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
  159. #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
  160. #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
  161. #define EQOS_DMA_REGS_BASE 0x1000
  162. struct eqos_dma_regs {
  163. uint32_t mode; /* 0x1000 */
  164. uint32_t sysbus_mode; /* 0x1004 */
  165. uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
  166. uint32_t ch0_control; /* 0x1100 */
  167. uint32_t ch0_tx_control; /* 0x1104 */
  168. uint32_t ch0_rx_control; /* 0x1108 */
  169. uint32_t unused_110c; /* 0x110c */
  170. uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
  171. uint32_t ch0_txdesc_list_address; /* 0x1114 */
  172. uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
  173. uint32_t ch0_rxdesc_list_address; /* 0x111c */
  174. uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
  175. uint32_t unused_1124; /* 0x1124 */
  176. uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
  177. uint32_t ch0_txdesc_ring_length; /* 0x112c */
  178. uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
  179. };
  180. #define EQOS_DMA_MODE_SWR BIT(0)
  181. #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
  182. #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
  183. #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
  184. #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
  185. #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
  186. #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
  187. #define EQOS_DMA_CH0_CONTROL_DSL_SHIFT 18
  188. #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
  189. #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
  190. #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
  191. #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
  192. #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
  193. #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
  194. #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
  195. #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
  196. #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
  197. #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
  198. /* These registers are Tegra186-specific */
  199. #define EQOS_TEGRA186_REGS_BASE 0x8800
  200. struct eqos_tegra186_regs {
  201. uint32_t sdmemcomppadctrl; /* 0x8800 */
  202. uint32_t auto_cal_config; /* 0x8804 */
  203. uint32_t unused_8808; /* 0x8808 */
  204. uint32_t auto_cal_status; /* 0x880c */
  205. };
  206. #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
  207. #define EQOS_AUTO_CAL_CONFIG_START BIT(31)
  208. #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
  209. #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
  210. /* Descriptors */
  211. #define EQOS_DESCRIPTORS_TX 4
  212. #define EQOS_DESCRIPTORS_RX 4
  213. #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
  214. #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
  215. #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
  216. #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
  217. struct eqos_desc {
  218. u32 des0;
  219. u32 des1;
  220. u32 des2;
  221. u32 des3;
  222. };
  223. #define EQOS_DESC3_OWN BIT(31)
  224. #define EQOS_DESC3_FD BIT(29)
  225. #define EQOS_DESC3_LD BIT(28)
  226. #define EQOS_DESC3_BUF1V BIT(24)
  227. #define EQOS_AXI_WIDTH_32 4
  228. #define EQOS_AXI_WIDTH_64 8
  229. #define EQOS_AXI_WIDTH_128 16
  230. struct eqos_config {
  231. bool reg_access_always_ok;
  232. int mdio_wait;
  233. int swr_wait;
  234. int config_mac;
  235. int config_mac_mdio;
  236. unsigned int axi_bus_width;
  237. phy_interface_t (*interface)(struct udevice *dev);
  238. struct eqos_ops *ops;
  239. };
  240. struct eqos_ops {
  241. void (*eqos_inval_desc)(void *desc);
  242. void (*eqos_flush_desc)(void *desc);
  243. void (*eqos_inval_buffer)(void *buf, size_t size);
  244. void (*eqos_flush_buffer)(void *buf, size_t size);
  245. int (*eqos_probe_resources)(struct udevice *dev);
  246. int (*eqos_remove_resources)(struct udevice *dev);
  247. int (*eqos_stop_resets)(struct udevice *dev);
  248. int (*eqos_start_resets)(struct udevice *dev);
  249. int (*eqos_stop_clks)(struct udevice *dev);
  250. int (*eqos_start_clks)(struct udevice *dev);
  251. int (*eqos_calibrate_pads)(struct udevice *dev);
  252. int (*eqos_disable_calibration)(struct udevice *dev);
  253. int (*eqos_set_tx_clk_speed)(struct udevice *dev);
  254. ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
  255. };
  256. struct eqos_priv {
  257. struct udevice *dev;
  258. const struct eqos_config *config;
  259. fdt_addr_t regs;
  260. struct eqos_mac_regs *mac_regs;
  261. struct eqos_mtl_regs *mtl_regs;
  262. struct eqos_dma_regs *dma_regs;
  263. struct eqos_tegra186_regs *tegra186_regs;
  264. struct reset_ctl reset_ctl;
  265. struct gpio_desc phy_reset_gpio;
  266. struct clk clk_master_bus;
  267. struct clk clk_rx;
  268. struct clk clk_ptp_ref;
  269. struct clk clk_tx;
  270. struct clk clk_ck;
  271. struct clk clk_slave_bus;
  272. struct mii_dev *mii;
  273. struct phy_device *phy;
  274. u32 max_speed;
  275. void *descs;
  276. int tx_desc_idx, rx_desc_idx;
  277. unsigned int desc_size;
  278. void *tx_dma_buf;
  279. void *rx_dma_buf;
  280. void *rx_pkt;
  281. bool started;
  282. bool reg_access_ok;
  283. bool clk_ck_enabled;
  284. struct reset_ctl_bulk reset_bulk;
  285. struct clk_bulk clk_bulk;
  286. };
  287. /*
  288. * TX and RX descriptors are 16 bytes. This causes problems with the cache
  289. * maintenance on CPUs where the cache-line size exceeds the size of these
  290. * descriptors. What will happen is that when the driver receives a packet
  291. * it will be immediately requeued for the hardware to reuse. The CPU will
  292. * therefore need to flush the cache-line containing the descriptor, which
  293. * will cause all other descriptors in the same cache-line to be flushed
  294. * along with it. If one of those descriptors had been written to by the
  295. * device those changes (and the associated packet) will be lost.
  296. *
  297. * To work around this, we make use of non-cached memory if available. If
  298. * descriptors are mapped uncached there's no need to manually flush them
  299. * or invalidate them.
  300. *
  301. * Note that this only applies to descriptors. The packet data buffers do
  302. * not have the same constraints since they are 1536 bytes large, so they
  303. * are unlikely to share cache-lines.
  304. */
  305. static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
  306. {
  307. eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
  308. (unsigned int)ARCH_DMA_MINALIGN);
  309. return memalign(eqos->desc_size, num * eqos->desc_size);
  310. }
  311. static void eqos_free_descs(void *descs)
  312. {
  313. free(descs);
  314. }
  315. static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
  316. unsigned int num, bool rx)
  317. {
  318. return eqos->descs +
  319. ((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
  320. }
  321. static void eqos_inval_desc_generic(void *desc)
  322. {
  323. unsigned long start = (unsigned long)desc;
  324. unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
  325. ARCH_DMA_MINALIGN);
  326. invalidate_dcache_range(start, end);
  327. }
  328. static void eqos_flush_desc_generic(void *desc)
  329. {
  330. unsigned long start = (unsigned long)desc;
  331. unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
  332. ARCH_DMA_MINALIGN);
  333. flush_dcache_range(start, end);
  334. }
  335. static void eqos_inval_buffer_tegra186(void *buf, size_t size)
  336. {
  337. unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
  338. unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
  339. invalidate_dcache_range(start, end);
  340. }
  341. static void eqos_inval_buffer_generic(void *buf, size_t size)
  342. {
  343. unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
  344. unsigned long end = roundup((unsigned long)buf + size,
  345. ARCH_DMA_MINALIGN);
  346. invalidate_dcache_range(start, end);
  347. }
  348. static void eqos_flush_buffer_tegra186(void *buf, size_t size)
  349. {
  350. flush_cache((unsigned long)buf, size);
  351. }
  352. static void eqos_flush_buffer_generic(void *buf, size_t size)
  353. {
  354. unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
  355. unsigned long end = roundup((unsigned long)buf + size,
  356. ARCH_DMA_MINALIGN);
  357. flush_dcache_range(start, end);
  358. }
  359. static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
  360. {
  361. return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
  362. EQOS_MAC_MDIO_ADDRESS_GB, false,
  363. 1000000, true);
  364. }
  365. static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
  366. int mdio_reg)
  367. {
  368. struct eqos_priv *eqos = bus->priv;
  369. u32 val;
  370. int ret;
  371. debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
  372. mdio_reg);
  373. ret = eqos_mdio_wait_idle(eqos);
  374. if (ret) {
  375. pr_err("MDIO not idle at entry");
  376. return ret;
  377. }
  378. val = readl(&eqos->mac_regs->mdio_address);
  379. val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
  380. EQOS_MAC_MDIO_ADDRESS_C45E;
  381. val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
  382. (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
  383. (eqos->config->config_mac_mdio <<
  384. EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
  385. (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
  386. EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
  387. EQOS_MAC_MDIO_ADDRESS_GB;
  388. writel(val, &eqos->mac_regs->mdio_address);
  389. udelay(eqos->config->mdio_wait);
  390. ret = eqos_mdio_wait_idle(eqos);
  391. if (ret) {
  392. pr_err("MDIO read didn't complete");
  393. return ret;
  394. }
  395. val = readl(&eqos->mac_regs->mdio_data);
  396. val &= EQOS_MAC_MDIO_DATA_GD_MASK;
  397. debug("%s: val=%x\n", __func__, val);
  398. return val;
  399. }
  400. static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
  401. int mdio_reg, u16 mdio_val)
  402. {
  403. struct eqos_priv *eqos = bus->priv;
  404. u32 val;
  405. int ret;
  406. debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
  407. mdio_addr, mdio_reg, mdio_val);
  408. ret = eqos_mdio_wait_idle(eqos);
  409. if (ret) {
  410. pr_err("MDIO not idle at entry");
  411. return ret;
  412. }
  413. writel(mdio_val, &eqos->mac_regs->mdio_data);
  414. val = readl(&eqos->mac_regs->mdio_address);
  415. val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
  416. EQOS_MAC_MDIO_ADDRESS_C45E;
  417. val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
  418. (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
  419. (eqos->config->config_mac_mdio <<
  420. EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
  421. (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
  422. EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
  423. EQOS_MAC_MDIO_ADDRESS_GB;
  424. writel(val, &eqos->mac_regs->mdio_address);
  425. udelay(eqos->config->mdio_wait);
  426. ret = eqos_mdio_wait_idle(eqos);
  427. if (ret) {
  428. pr_err("MDIO read didn't complete");
  429. return ret;
  430. }
  431. return 0;
  432. }
  433. static int eqos_start_clks_tegra186(struct udevice *dev)
  434. {
  435. #ifdef CONFIG_CLK
  436. struct eqos_priv *eqos = dev_get_priv(dev);
  437. int ret;
  438. debug("%s(dev=%p):\n", __func__, dev);
  439. ret = clk_enable(&eqos->clk_slave_bus);
  440. if (ret < 0) {
  441. pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
  442. goto err;
  443. }
  444. ret = clk_enable(&eqos->clk_master_bus);
  445. if (ret < 0) {
  446. pr_err("clk_enable(clk_master_bus) failed: %d", ret);
  447. goto err_disable_clk_slave_bus;
  448. }
  449. ret = clk_enable(&eqos->clk_rx);
  450. if (ret < 0) {
  451. pr_err("clk_enable(clk_rx) failed: %d", ret);
  452. goto err_disable_clk_master_bus;
  453. }
  454. ret = clk_enable(&eqos->clk_ptp_ref);
  455. if (ret < 0) {
  456. pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
  457. goto err_disable_clk_rx;
  458. }
  459. ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
  460. if (ret < 0) {
  461. pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
  462. goto err_disable_clk_ptp_ref;
  463. }
  464. ret = clk_enable(&eqos->clk_tx);
  465. if (ret < 0) {
  466. pr_err("clk_enable(clk_tx) failed: %d", ret);
  467. goto err_disable_clk_ptp_ref;
  468. }
  469. #endif
  470. debug("%s: OK\n", __func__);
  471. return 0;
  472. #ifdef CONFIG_CLK
  473. err_disable_clk_ptp_ref:
  474. clk_disable(&eqos->clk_ptp_ref);
  475. err_disable_clk_rx:
  476. clk_disable(&eqos->clk_rx);
  477. err_disable_clk_master_bus:
  478. clk_disable(&eqos->clk_master_bus);
  479. err_disable_clk_slave_bus:
  480. clk_disable(&eqos->clk_slave_bus);
  481. err:
  482. debug("%s: FAILED: %d\n", __func__, ret);
  483. return ret;
  484. #endif
  485. }
  486. static int eqos_start_clks_stm32(struct udevice *dev)
  487. {
  488. #ifdef CONFIG_CLK
  489. struct eqos_priv *eqos = dev_get_priv(dev);
  490. int ret;
  491. debug("%s(dev=%p):\n", __func__, dev);
  492. ret = clk_enable(&eqos->clk_master_bus);
  493. if (ret < 0) {
  494. pr_err("clk_enable(clk_master_bus) failed: %d", ret);
  495. goto err;
  496. }
  497. ret = clk_enable(&eqos->clk_rx);
  498. if (ret < 0) {
  499. pr_err("clk_enable(clk_rx) failed: %d", ret);
  500. goto err_disable_clk_master_bus;
  501. }
  502. ret = clk_enable(&eqos->clk_tx);
  503. if (ret < 0) {
  504. pr_err("clk_enable(clk_tx) failed: %d", ret);
  505. goto err_disable_clk_rx;
  506. }
  507. if (clk_valid(&eqos->clk_ck) && !eqos->clk_ck_enabled) {
  508. ret = clk_enable(&eqos->clk_ck);
  509. if (ret < 0) {
  510. pr_err("clk_enable(clk_ck) failed: %d", ret);
  511. goto err_disable_clk_tx;
  512. }
  513. eqos->clk_ck_enabled = true;
  514. }
  515. #endif
  516. debug("%s: OK\n", __func__);
  517. return 0;
  518. #ifdef CONFIG_CLK
  519. err_disable_clk_tx:
  520. clk_disable(&eqos->clk_tx);
  521. err_disable_clk_rx:
  522. clk_disable(&eqos->clk_rx);
  523. err_disable_clk_master_bus:
  524. clk_disable(&eqos->clk_master_bus);
  525. err:
  526. debug("%s: FAILED: %d\n", __func__, ret);
  527. return ret;
  528. #endif
  529. }
  530. static int eqos_start_clks_jh7110(struct udevice *dev)
  531. {
  532. struct eqos_priv *eqos = dev_get_priv(dev);
  533. int ret;
  534. ret = clk_enable_bulk(&eqos->clk_bulk);
  535. if (ret < 0) {
  536. pr_err("clk_enable_bulk failed: %d", ret);
  537. }
  538. return ret;
  539. }
  540. static int eqos_stop_clks_tegra186(struct udevice *dev)
  541. {
  542. #ifdef CONFIG_CLK
  543. struct eqos_priv *eqos = dev_get_priv(dev);
  544. debug("%s(dev=%p):\n", __func__, dev);
  545. clk_disable(&eqos->clk_tx);
  546. clk_disable(&eqos->clk_ptp_ref);
  547. clk_disable(&eqos->clk_rx);
  548. clk_disable(&eqos->clk_master_bus);
  549. clk_disable(&eqos->clk_slave_bus);
  550. #endif
  551. debug("%s: OK\n", __func__);
  552. return 0;
  553. }
  554. static int eqos_stop_clks_stm32(struct udevice *dev)
  555. {
  556. #ifdef CONFIG_CLK
  557. struct eqos_priv *eqos = dev_get_priv(dev);
  558. debug("%s(dev=%p):\n", __func__, dev);
  559. clk_disable(&eqos->clk_tx);
  560. clk_disable(&eqos->clk_rx);
  561. clk_disable(&eqos->clk_master_bus);
  562. #endif
  563. debug("%s: OK\n", __func__);
  564. return 0;
  565. }
  566. static int eqos_stop_clks_jh7110(struct udevice *dev)
  567. {
  568. struct eqos_priv *eqos = dev_get_priv(dev);
  569. clk_disable_bulk(&eqos->clk_bulk);
  570. return 0;
  571. }
  572. static int eqos_start_resets_tegra186(struct udevice *dev)
  573. {
  574. struct eqos_priv *eqos = dev_get_priv(dev);
  575. int ret;
  576. debug("%s(dev=%p):\n", __func__, dev);
  577. ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
  578. if (ret < 0) {
  579. pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
  580. return ret;
  581. }
  582. udelay(2);
  583. ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
  584. if (ret < 0) {
  585. pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
  586. return ret;
  587. }
  588. ret = reset_assert(&eqos->reset_ctl);
  589. if (ret < 0) {
  590. pr_err("reset_assert() failed: %d", ret);
  591. return ret;
  592. }
  593. udelay(2);
  594. ret = reset_deassert(&eqos->reset_ctl);
  595. if (ret < 0) {
  596. pr_err("reset_deassert() failed: %d", ret);
  597. return ret;
  598. }
  599. debug("%s: OK\n", __func__);
  600. return 0;
  601. }
  602. static int eqos_start_resets_jh7110(struct udevice *dev)
  603. {
  604. struct eqos_priv *eqos = dev_get_priv(dev);
  605. int ret;
  606. ret = reset_deassert_bulk(&eqos->reset_bulk);
  607. if (ret < 0) {
  608. pr_err("reset_deassert() failed: %d", ret);
  609. return ret;
  610. }
  611. debug("%s: OK\n", __func__);
  612. return 0;
  613. }
  614. static int eqos_stop_resets_tegra186(struct udevice *dev)
  615. {
  616. struct eqos_priv *eqos = dev_get_priv(dev);
  617. reset_assert(&eqos->reset_ctl);
  618. dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
  619. return 0;
  620. }
  621. static int eqos_stop_resets_jh7110(struct udevice *dev)
  622. {
  623. struct eqos_priv *eqos = dev_get_priv(dev);
  624. reset_assert_bulk(&eqos->reset_bulk);
  625. return 0;
  626. }
  627. static int eqos_calibrate_pads_tegra186(struct udevice *dev)
  628. {
  629. struct eqos_priv *eqos = dev_get_priv(dev);
  630. int ret;
  631. debug("%s(dev=%p):\n", __func__, dev);
  632. setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
  633. EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
  634. udelay(1);
  635. setbits_le32(&eqos->tegra186_regs->auto_cal_config,
  636. EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
  637. ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
  638. EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
  639. if (ret) {
  640. pr_err("calibrate didn't start");
  641. goto failed;
  642. }
  643. ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
  644. EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
  645. if (ret) {
  646. pr_err("calibrate didn't finish");
  647. goto failed;
  648. }
  649. ret = 0;
  650. failed:
  651. clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
  652. EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
  653. debug("%s: returns %d\n", __func__, ret);
  654. return ret;
  655. }
  656. static int eqos_disable_calibration_tegra186(struct udevice *dev)
  657. {
  658. struct eqos_priv *eqos = dev_get_priv(dev);
  659. debug("%s(dev=%p):\n", __func__, dev);
  660. clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
  661. EQOS_AUTO_CAL_CONFIG_ENABLE);
  662. return 0;
  663. }
  664. static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
  665. {
  666. #ifdef CONFIG_CLK
  667. struct eqos_priv *eqos = dev_get_priv(dev);
  668. return clk_get_rate(&eqos->clk_slave_bus);
  669. #else
  670. return 0;
  671. #endif
  672. }
  673. static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
  674. {
  675. #ifdef CONFIG_CLK
  676. struct eqos_priv *eqos = dev_get_priv(dev);
  677. return clk_get_rate(&eqos->clk_master_bus);
  678. #else
  679. return 0;
  680. #endif
  681. }
  682. static ulong eqos_get_tick_clk_rate_jh7110(struct udevice *dev)
  683. {
  684. struct eqos_priv *eqos = dev_get_priv(dev);
  685. ulong rate;
  686. rate = clk_get_rate(&eqos->clk_tx);
  687. return rate;
  688. }
  689. __weak int jh7110_eqos_txclk_set_rate(struct udevice *dev,
  690. unsigned long rate)
  691. {
  692. struct eqos_priv *eqos = dev_get_priv(dev);
  693. return clk_set_rate(&eqos->clk_tx, rate);
  694. }
  695. __weak u32 imx_get_eqos_csr_clk(void)
  696. {
  697. return 100 * 1000000;
  698. }
  699. __weak int imx_eqos_txclk_set_rate(unsigned long rate)
  700. {
  701. return 0;
  702. }
  703. static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
  704. {
  705. return imx_get_eqos_csr_clk();
  706. }
  707. static int eqos_set_full_duplex(struct udevice *dev)
  708. {
  709. struct eqos_priv *eqos = dev_get_priv(dev);
  710. debug("%s(dev=%p):\n", __func__, dev);
  711. setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
  712. return 0;
  713. }
  714. static int eqos_set_half_duplex(struct udevice *dev)
  715. {
  716. struct eqos_priv *eqos = dev_get_priv(dev);
  717. debug("%s(dev=%p):\n", __func__, dev);
  718. clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
  719. /* WAR: Flush TX queue when switching to half-duplex */
  720. setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
  721. EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
  722. return 0;
  723. }
  724. static int eqos_set_gmii_speed(struct udevice *dev)
  725. {
  726. struct eqos_priv *eqos = dev_get_priv(dev);
  727. debug("%s(dev=%p):\n", __func__, dev);
  728. clrbits_le32(&eqos->mac_regs->configuration,
  729. EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
  730. return 0;
  731. }
  732. static int eqos_set_mii_speed_100(struct udevice *dev)
  733. {
  734. struct eqos_priv *eqos = dev_get_priv(dev);
  735. debug("%s(dev=%p):\n", __func__, dev);
  736. setbits_le32(&eqos->mac_regs->configuration,
  737. EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
  738. return 0;
  739. }
  740. static int eqos_set_mii_speed_10(struct udevice *dev)
  741. {
  742. struct eqos_priv *eqos = dev_get_priv(dev);
  743. debug("%s(dev=%p):\n", __func__, dev);
  744. clrsetbits_le32(&eqos->mac_regs->configuration,
  745. EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
  746. return 0;
  747. }
  748. static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
  749. {
  750. #ifdef CONFIG_CLK
  751. struct eqos_priv *eqos = dev_get_priv(dev);
  752. ulong rate;
  753. int ret;
  754. debug("%s(dev=%p):\n", __func__, dev);
  755. switch (eqos->phy->speed) {
  756. case SPEED_1000:
  757. rate = 125 * 1000 * 1000;
  758. break;
  759. case SPEED_100:
  760. rate = 25 * 1000 * 1000;
  761. break;
  762. case SPEED_10:
  763. rate = 2.5 * 1000 * 1000;
  764. break;
  765. default:
  766. pr_err("invalid speed %d", eqos->phy->speed);
  767. return -EINVAL;
  768. }
  769. ret = clk_set_rate(&eqos->clk_tx, rate);
  770. if (ret < 0) {
  771. pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
  772. return ret;
  773. }
  774. #endif
  775. return 0;
  776. }
  777. static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
  778. {
  779. struct eqos_priv *eqos = dev_get_priv(dev);
  780. ulong rate;
  781. int ret;
  782. debug("%s(dev=%p):\n", __func__, dev);
  783. switch (eqos->phy->speed) {
  784. case SPEED_1000:
  785. rate = 125 * 1000 * 1000;
  786. break;
  787. case SPEED_100:
  788. rate = 25 * 1000 * 1000;
  789. break;
  790. case SPEED_10:
  791. rate = 2.5 * 1000 * 1000;
  792. break;
  793. default:
  794. pr_err("invalid speed %d", eqos->phy->speed);
  795. return -EINVAL;
  796. }
  797. ret = imx_eqos_txclk_set_rate(rate);
  798. if (ret < 0) {
  799. pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
  800. return ret;
  801. }
  802. return 0;
  803. }
  804. static int eqos_set_tx_clk_speed_jh7110(struct udevice *dev)
  805. {
  806. struct eqos_priv *eqos = dev_get_priv(dev);
  807. ulong rate;
  808. int ret;
  809. switch (eqos->phy->speed) {
  810. case SPEED_1000:
  811. rate = 125 * 1000 * 1000;
  812. break;
  813. case SPEED_100:
  814. rate = 25 * 1000 * 1000;
  815. break;
  816. case SPEED_10:
  817. rate = 2.5 * 1000 * 1000;
  818. break;
  819. default:
  820. pr_err("invalid speed %d", eqos->phy->speed);
  821. return -EINVAL;
  822. }
  823. ret = jh7110_eqos_txclk_set_rate(dev, rate);
  824. if (ret < 0) {
  825. pr_err("jh7110 (tx_clk, %lu) failed: %d", rate, ret);
  826. return ret;
  827. }
  828. return 0;
  829. }
  830. static int eqos_adjust_link(struct udevice *dev)
  831. {
  832. struct eqos_priv *eqos = dev_get_priv(dev);
  833. int ret;
  834. bool en_calibration;
  835. debug("%s(dev=%p):\n", __func__, dev);
  836. if (eqos->phy->duplex)
  837. ret = eqos_set_full_duplex(dev);
  838. else
  839. ret = eqos_set_half_duplex(dev);
  840. if (ret < 0) {
  841. pr_err("eqos_set_*_duplex() failed: %d", ret);
  842. return ret;
  843. }
  844. switch (eqos->phy->speed) {
  845. case SPEED_1000:
  846. en_calibration = true;
  847. ret = eqos_set_gmii_speed(dev);
  848. break;
  849. case SPEED_100:
  850. en_calibration = true;
  851. ret = eqos_set_mii_speed_100(dev);
  852. break;
  853. case SPEED_10:
  854. en_calibration = false;
  855. ret = eqos_set_mii_speed_10(dev);
  856. break;
  857. default:
  858. pr_err("invalid speed %d", eqos->phy->speed);
  859. return -EINVAL;
  860. }
  861. if (ret < 0) {
  862. pr_err("eqos_set_*mii_speed*() failed: %d", ret);
  863. return ret;
  864. }
  865. if (en_calibration) {
  866. ret = eqos->config->ops->eqos_calibrate_pads(dev);
  867. if (ret < 0) {
  868. pr_err("eqos_calibrate_pads() failed: %d",
  869. ret);
  870. return ret;
  871. }
  872. } else {
  873. ret = eqos->config->ops->eqos_disable_calibration(dev);
  874. if (ret < 0) {
  875. pr_err("eqos_disable_calibration() failed: %d",
  876. ret);
  877. return ret;
  878. }
  879. }
  880. ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
  881. if (ret < 0) {
  882. pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
  883. return ret;
  884. }
  885. return 0;
  886. }
  887. static int eqos_write_hwaddr(struct udevice *dev)
  888. {
  889. struct eth_pdata *plat = dev_get_plat(dev);
  890. struct eqos_priv *eqos = dev_get_priv(dev);
  891. uint32_t val;
  892. /*
  893. * This function may be called before start() or after stop(). At that
  894. * time, on at least some configurations of the EQoS HW, all clocks to
  895. * the EQoS HW block will be stopped, and a reset signal applied. If
  896. * any register access is attempted in this state, bus timeouts or CPU
  897. * hangs may occur. This check prevents that.
  898. *
  899. * A simple solution to this problem would be to not implement
  900. * write_hwaddr(), since start() always writes the MAC address into HW
  901. * anyway. However, it is desirable to implement write_hwaddr() to
  902. * support the case of SW that runs subsequent to U-Boot which expects
  903. * the MAC address to already be programmed into the EQoS registers,
  904. * which must happen irrespective of whether the U-Boot user (or
  905. * scripts) actually made use of the EQoS device, and hence
  906. * irrespective of whether start() was ever called.
  907. *
  908. * Note that this requirement by subsequent SW is not valid for
  909. * Tegra186, and is likely not valid for any non-PCI instantiation of
  910. * the EQoS HW block. This function is implemented solely as
  911. * future-proofing with the expectation the driver will eventually be
  912. * ported to some system where the expectation above is true.
  913. */
  914. if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
  915. return 0;
  916. /* Update the MAC address */
  917. val = (plat->enetaddr[5] << 8) |
  918. (plat->enetaddr[4]);
  919. writel(val, &eqos->mac_regs->address0_high);
  920. val = (plat->enetaddr[3] << 24) |
  921. (plat->enetaddr[2] << 16) |
  922. (plat->enetaddr[1] << 8) |
  923. (plat->enetaddr[0]);
  924. writel(val, &eqos->mac_regs->address0_low);
  925. return 0;
  926. }
  927. static int eqos_read_rom_hwaddr(struct udevice *dev)
  928. {
  929. struct eth_pdata *pdata = dev_get_plat(dev);
  930. #ifdef CONFIG_ARCH_IMX8M
  931. imx_get_mac_from_fuse(dev_seq(dev), pdata->enetaddr);
  932. #endif
  933. return !is_valid_ethaddr(pdata->enetaddr);
  934. }
  935. static int eqos_start(struct udevice *dev)
  936. {
  937. struct eqos_priv *eqos = dev_get_priv(dev);
  938. int ret, i;
  939. ulong rate;
  940. u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
  941. ulong last_rx_desc;
  942. ulong desc_pad;
  943. debug("%s(dev=%p):\n", __func__, dev);
  944. eqos->tx_desc_idx = 0;
  945. eqos->rx_desc_idx = 0;
  946. ret = eqos->config->ops->eqos_start_clks(dev);
  947. if (ret < 0) {
  948. pr_err("eqos_start_clks() failed: %d", ret);
  949. goto err;
  950. }
  951. ret = eqos->config->ops->eqos_start_resets(dev);
  952. if (ret < 0) {
  953. pr_err("eqos_start_resets() failed: %d", ret);
  954. goto err_stop_clks;
  955. }
  956. udelay(10);
  957. eqos->reg_access_ok = true;
  958. ret = wait_for_bit_le32(&eqos->dma_regs->mode,
  959. EQOS_DMA_MODE_SWR, false,
  960. eqos->config->swr_wait, false);
  961. if (ret) {
  962. pr_err("EQOS_DMA_MODE_SWR stuck");
  963. goto err_stop_resets;
  964. }
  965. ret = eqos->config->ops->eqos_calibrate_pads(dev);
  966. if (ret < 0) {
  967. pr_err("eqos_calibrate_pads() failed: %d", ret);
  968. goto err_stop_resets;
  969. }
  970. rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
  971. val = (rate / 1000000) - 1;
  972. writel(val, &eqos->mac_regs->us_tic_counter);
  973. /*
  974. * if PHY was already connected and configured,
  975. * don't need to reconnect/reconfigure again
  976. */
  977. if (!eqos->phy) {
  978. int addr = -1;
  979. #ifdef CONFIG_DM_ETH_PHY
  980. addr = eth_phy_get_addr(dev);
  981. #endif
  982. #ifdef DWC_NET_PHYADDR
  983. addr = DWC_NET_PHYADDR;
  984. #endif
  985. eqos->phy = phy_connect(eqos->mii, addr, dev,
  986. eqos->config->interface(dev));
  987. if (!eqos->phy) {
  988. pr_err("phy_connect() failed");
  989. goto err_stop_resets;
  990. }
  991. if (eqos->max_speed) {
  992. ret = phy_set_supported(eqos->phy, eqos->max_speed);
  993. if (ret) {
  994. pr_err("phy_set_supported() failed: %d", ret);
  995. goto err_shutdown_phy;
  996. }
  997. }
  998. ret = phy_config(eqos->phy);
  999. if (ret < 0) {
  1000. pr_err("phy_config() failed: %d", ret);
  1001. goto err_shutdown_phy;
  1002. }
  1003. }
  1004. ret = phy_startup(eqos->phy);
  1005. if (ret < 0) {
  1006. pr_err("phy_startup() failed: %d", ret);
  1007. goto err_shutdown_phy;
  1008. }
  1009. if (!eqos->phy->link) {
  1010. pr_err("No link");
  1011. goto err_shutdown_phy;
  1012. }
  1013. ret = eqos_adjust_link(dev);
  1014. if (ret < 0) {
  1015. pr_err("eqos_adjust_link() failed: %d", ret);
  1016. goto err_shutdown_phy;
  1017. }
  1018. /* Configure MTL */
  1019. /* Enable Store and Forward mode for TX */
  1020. /* Program Tx operating mode */
  1021. setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
  1022. EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
  1023. (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
  1024. EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
  1025. /* Transmit Queue weight */
  1026. writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
  1027. /* Enable Store and Forward mode for RX, since no jumbo frame */
  1028. setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1029. EQOS_MTL_RXQ0_OPERATION_MODE_RSF);
  1030. /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
  1031. val = readl(&eqos->mac_regs->hw_feature1);
  1032. tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
  1033. EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
  1034. rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
  1035. EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
  1036. /*
  1037. * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
  1038. * r/tqs is encoded as (n / 256) - 1.
  1039. */
  1040. tqs = (128 << tx_fifo_sz) / 256 - 1;
  1041. rqs = (128 << rx_fifo_sz) / 256 - 1;
  1042. clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
  1043. EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
  1044. EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
  1045. tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
  1046. clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1047. EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
  1048. EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
  1049. rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
  1050. /* Flow control used only if each channel gets 4KB or more FIFO */
  1051. if (rqs >= ((4096 / 256) - 1)) {
  1052. u32 rfd, rfa;
  1053. setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1054. EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
  1055. /*
  1056. * Set Threshold for Activating Flow Contol space for min 2
  1057. * frames ie, (1500 * 1) = 1500 bytes.
  1058. *
  1059. * Set Threshold for Deactivating Flow Contol for space of
  1060. * min 1 frame (frame size 1500bytes) in receive fifo
  1061. */
  1062. if (rqs == ((4096 / 256) - 1)) {
  1063. /*
  1064. * This violates the above formula because of FIFO size
  1065. * limit therefore overflow may occur inspite of this.
  1066. */
  1067. rfd = 0x3; /* Full-3K */
  1068. rfa = 0x1; /* Full-1.5K */
  1069. } else if (rqs == ((8192 / 256) - 1)) {
  1070. rfd = 0x6; /* Full-4K */
  1071. rfa = 0xa; /* Full-6K */
  1072. } else if (rqs == ((16384 / 256) - 1)) {
  1073. rfd = 0x6; /* Full-4K */
  1074. rfa = 0x12; /* Full-10K */
  1075. } else {
  1076. rfd = 0x6; /* Full-4K */
  1077. rfa = 0x1E; /* Full-16K */
  1078. }
  1079. clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
  1080. (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
  1081. EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
  1082. (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
  1083. EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
  1084. (rfd <<
  1085. EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
  1086. (rfa <<
  1087. EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
  1088. }
  1089. /* Configure MAC */
  1090. clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
  1091. EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
  1092. EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
  1093. eqos->config->config_mac <<
  1094. EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
  1095. /* Multicast and Broadcast Queue Enable */
  1096. setbits_le32(&eqos->mac_regs->unused_0a4,
  1097. 0x00100000);
  1098. /* enable promise mode */
  1099. setbits_le32(&eqos->mac_regs->unused_004[1],
  1100. 0x1);
  1101. /* Set TX flow control parameters */
  1102. /* Set Pause Time */
  1103. setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
  1104. 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
  1105. /* Assign priority for TX flow control */
  1106. clrbits_le32(&eqos->mac_regs->txq_prty_map0,
  1107. EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
  1108. EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
  1109. /* Assign priority for RX flow control */
  1110. clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
  1111. EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
  1112. EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
  1113. /* Enable flow control */
  1114. setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
  1115. EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
  1116. setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
  1117. EQOS_MAC_RX_FLOW_CTRL_RFE);
  1118. clrsetbits_le32(&eqos->mac_regs->configuration,
  1119. EQOS_MAC_CONFIGURATION_GPSLCE |
  1120. EQOS_MAC_CONFIGURATION_WD |
  1121. EQOS_MAC_CONFIGURATION_JD |
  1122. EQOS_MAC_CONFIGURATION_JE,
  1123. EQOS_MAC_CONFIGURATION_CST |
  1124. EQOS_MAC_CONFIGURATION_ACS);
  1125. eqos_write_hwaddr(dev);
  1126. /* Configure DMA */
  1127. /* Enable OSP mode */
  1128. setbits_le32(&eqos->dma_regs->ch0_tx_control,
  1129. EQOS_DMA_CH0_TX_CONTROL_OSP);
  1130. /* RX buffer size. Must be a multiple of bus width */
  1131. clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
  1132. EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
  1133. EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
  1134. EQOS_MAX_PACKET_SIZE <<
  1135. EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
  1136. desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) /
  1137. eqos->config->axi_bus_width;
  1138. setbits_le32(&eqos->dma_regs->ch0_control,
  1139. EQOS_DMA_CH0_CONTROL_PBLX8 |
  1140. (desc_pad << EQOS_DMA_CH0_CONTROL_DSL_SHIFT));
  1141. /*
  1142. * Burst length must be < 1/2 FIFO size.
  1143. * FIFO size in tqs is encoded as (n / 256) - 1.
  1144. * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
  1145. * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
  1146. */
  1147. pbl = tqs + 1;
  1148. if (pbl > 32)
  1149. pbl = 32;
  1150. clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
  1151. EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
  1152. EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
  1153. pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
  1154. clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
  1155. EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
  1156. EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
  1157. 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
  1158. /* DMA performance configuration */
  1159. val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
  1160. EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
  1161. EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
  1162. writel(val, &eqos->dma_regs->sysbus_mode);
  1163. /* Set up descriptors */
  1164. memset(eqos->descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_NUM);
  1165. for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
  1166. struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
  1167. eqos->config->ops->eqos_flush_desc(tx_desc);
  1168. }
  1169. for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
  1170. struct eqos_desc *rx_desc = eqos_get_desc(eqos, i, true);
  1171. rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
  1172. (i * EQOS_MAX_PACKET_SIZE));
  1173. rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
  1174. mb();
  1175. eqos->config->ops->eqos_flush_desc(rx_desc);
  1176. eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
  1177. (i * EQOS_MAX_PACKET_SIZE),
  1178. EQOS_MAX_PACKET_SIZE);
  1179. }
  1180. writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
  1181. writel((ulong)eqos_get_desc(eqos, 0, false),
  1182. &eqos->dma_regs->ch0_txdesc_list_address);
  1183. writel(EQOS_DESCRIPTORS_TX - 1,
  1184. &eqos->dma_regs->ch0_txdesc_ring_length);
  1185. writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
  1186. writel((ulong)eqos_get_desc(eqos, 0, true),
  1187. &eqos->dma_regs->ch0_rxdesc_list_address);
  1188. writel(EQOS_DESCRIPTORS_RX - 1,
  1189. &eqos->dma_regs->ch0_rxdesc_ring_length);
  1190. /* Enable everything */
  1191. setbits_le32(&eqos->dma_regs->ch0_tx_control,
  1192. EQOS_DMA_CH0_TX_CONTROL_ST);
  1193. setbits_le32(&eqos->dma_regs->ch0_rx_control,
  1194. EQOS_DMA_CH0_RX_CONTROL_SR);
  1195. setbits_le32(&eqos->mac_regs->configuration,
  1196. EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
  1197. /* TX tail pointer not written until we need to TX a packet */
  1198. /*
  1199. * Point RX tail pointer at last descriptor. Ideally, we'd point at the
  1200. * first descriptor, implying all descriptors were available. However,
  1201. * that's not distinguishable from none of the descriptors being
  1202. * available.
  1203. */
  1204. last_rx_desc = (ulong)eqos_get_desc(eqos, EQOS_DESCRIPTORS_RX - 1, true);
  1205. writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
  1206. eqos->started = true;
  1207. debug("%s: OK\n", __func__);
  1208. return 0;
  1209. err_shutdown_phy:
  1210. phy_shutdown(eqos->phy);
  1211. err_stop_resets:
  1212. eqos->config->ops->eqos_stop_resets(dev);
  1213. err_stop_clks:
  1214. eqos->config->ops->eqos_stop_clks(dev);
  1215. err:
  1216. pr_err("FAILED: %d", ret);
  1217. return ret;
  1218. }
  1219. static void eqos_stop(struct udevice *dev)
  1220. {
  1221. struct eqos_priv *eqos = dev_get_priv(dev);
  1222. int i;
  1223. debug("%s(dev=%p):\n", __func__, dev);
  1224. if (!eqos->started)
  1225. return;
  1226. eqos->started = false;
  1227. eqos->reg_access_ok = false;
  1228. /* Disable TX DMA */
  1229. clrbits_le32(&eqos->dma_regs->ch0_tx_control,
  1230. EQOS_DMA_CH0_TX_CONTROL_ST);
  1231. /* Wait for TX all packets to drain out of MTL */
  1232. for (i = 0; i < 1000000; i++) {
  1233. u32 val = readl(&eqos->mtl_regs->txq0_debug);
  1234. u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
  1235. EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
  1236. u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
  1237. if ((trcsts != 1) && (!txqsts))
  1238. break;
  1239. }
  1240. /* Turn off MAC TX and RX */
  1241. clrbits_le32(&eqos->mac_regs->configuration,
  1242. EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
  1243. /* Wait for all RX packets to drain out of MTL */
  1244. for (i = 0; i < 1000000; i++) {
  1245. u32 val = readl(&eqos->mtl_regs->rxq0_debug);
  1246. u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
  1247. EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
  1248. u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
  1249. EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
  1250. if ((!prxq) && (!rxqsts))
  1251. break;
  1252. }
  1253. /* Turn off RX DMA */
  1254. clrbits_le32(&eqos->dma_regs->ch0_rx_control,
  1255. EQOS_DMA_CH0_RX_CONTROL_SR);
  1256. if (eqos->phy) {
  1257. phy_shutdown(eqos->phy);
  1258. }
  1259. eqos->config->ops->eqos_stop_resets(dev);
  1260. eqos->config->ops->eqos_stop_clks(dev);
  1261. debug("%s: OK\n", __func__);
  1262. }
  1263. static int eqos_send(struct udevice *dev, void *packet, int length)
  1264. {
  1265. struct eqos_priv *eqos = dev_get_priv(dev);
  1266. struct eqos_desc *tx_desc;
  1267. int i;
  1268. debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
  1269. length);
  1270. memcpy(eqos->tx_dma_buf, packet, length);
  1271. eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
  1272. tx_desc = eqos_get_desc(eqos, eqos->tx_desc_idx, false);
  1273. eqos->tx_desc_idx++;
  1274. eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
  1275. tx_desc->des0 = (ulong)eqos->tx_dma_buf;
  1276. tx_desc->des1 = 0;
  1277. tx_desc->des2 = length;
  1278. /*
  1279. * Make sure that if HW sees the _OWN write below, it will see all the
  1280. * writes to the rest of the descriptor too.
  1281. */
  1282. mb();
  1283. tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
  1284. eqos->config->ops->eqos_flush_desc(tx_desc);
  1285. writel((ulong)eqos_get_desc(eqos, eqos->tx_desc_idx, false),
  1286. &eqos->dma_regs->ch0_txdesc_tail_pointer);
  1287. for (i = 0; i < 1000000; i++) {
  1288. eqos->config->ops->eqos_inval_desc(tx_desc);
  1289. if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
  1290. return 0;
  1291. udelay(1);
  1292. }
  1293. debug("%s: TX timeout\n", __func__);
  1294. return -ETIMEDOUT;
  1295. }
  1296. static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
  1297. {
  1298. struct eqos_priv *eqos = dev_get_priv(dev);
  1299. struct eqos_desc *rx_desc;
  1300. int length;
  1301. debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
  1302. rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
  1303. eqos->config->ops->eqos_inval_desc(rx_desc);
  1304. if (rx_desc->des3 & EQOS_DESC3_OWN) {
  1305. debug("%s: RX packet not available\n", __func__);
  1306. return -EAGAIN;
  1307. }
  1308. *packetp = eqos->rx_dma_buf +
  1309. (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
  1310. length = rx_desc->des3 & 0x7fff;
  1311. debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
  1312. eqos->config->ops->eqos_inval_buffer(*packetp, length);
  1313. return length;
  1314. }
  1315. static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
  1316. {
  1317. struct eqos_priv *eqos = dev_get_priv(dev);
  1318. uchar *packet_expected;
  1319. struct eqos_desc *rx_desc;
  1320. debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
  1321. packet_expected = eqos->rx_dma_buf +
  1322. (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
  1323. if (packet != packet_expected) {
  1324. debug("%s: Unexpected packet (expected %p)\n", __func__,
  1325. packet_expected);
  1326. return -EINVAL;
  1327. }
  1328. eqos->config->ops->eqos_inval_buffer(packet, length);
  1329. rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
  1330. rx_desc->des0 = 0;
  1331. mb();
  1332. eqos->config->ops->eqos_flush_desc(rx_desc);
  1333. eqos->config->ops->eqos_inval_buffer(packet, length);
  1334. rx_desc->des0 = (u32)(ulong)packet;
  1335. rx_desc->des1 = 0;
  1336. rx_desc->des2 = 0;
  1337. /*
  1338. * Make sure that if HW sees the _OWN write below, it will see all the
  1339. * writes to the rest of the descriptor too.
  1340. */
  1341. mb();
  1342. rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
  1343. eqos->config->ops->eqos_flush_desc(rx_desc);
  1344. writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
  1345. eqos->rx_desc_idx++;
  1346. eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
  1347. return 0;
  1348. }
  1349. static int eqos_probe_resources_core(struct udevice *dev)
  1350. {
  1351. struct eqos_priv *eqos = dev_get_priv(dev);
  1352. int ret;
  1353. debug("%s(dev=%p):\n", __func__, dev);
  1354. eqos->descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_NUM);
  1355. if (!eqos->descs) {
  1356. debug("%s: eqos_alloc_descs() failed\n", __func__);
  1357. ret = -ENOMEM;
  1358. goto err;
  1359. }
  1360. eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
  1361. if (!eqos->tx_dma_buf) {
  1362. debug("%s: memalign(tx_dma_buf) failed\n", __func__);
  1363. ret = -ENOMEM;
  1364. goto err_free_descs;
  1365. }
  1366. debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
  1367. eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
  1368. if (!eqos->rx_dma_buf) {
  1369. debug("%s: memalign(rx_dma_buf) failed\n", __func__);
  1370. ret = -ENOMEM;
  1371. goto err_free_tx_dma_buf;
  1372. }
  1373. debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
  1374. eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
  1375. if (!eqos->rx_pkt) {
  1376. debug("%s: malloc(rx_pkt) failed\n", __func__);
  1377. ret = -ENOMEM;
  1378. goto err_free_rx_dma_buf;
  1379. }
  1380. debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
  1381. eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
  1382. EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
  1383. debug("%s: OK\n", __func__);
  1384. return 0;
  1385. err_free_rx_dma_buf:
  1386. free(eqos->rx_dma_buf);
  1387. err_free_tx_dma_buf:
  1388. free(eqos->tx_dma_buf);
  1389. err_free_descs:
  1390. eqos_free_descs(eqos->descs);
  1391. err:
  1392. debug("%s: returns %d\n", __func__, ret);
  1393. return ret;
  1394. }
  1395. static int eqos_remove_resources_core(struct udevice *dev)
  1396. {
  1397. struct eqos_priv *eqos = dev_get_priv(dev);
  1398. debug("%s(dev=%p):\n", __func__, dev);
  1399. free(eqos->rx_pkt);
  1400. free(eqos->rx_dma_buf);
  1401. free(eqos->tx_dma_buf);
  1402. eqos_free_descs(eqos->descs);
  1403. debug("%s: OK\n", __func__);
  1404. return 0;
  1405. }
  1406. static int eqos_probe_resources_tegra186(struct udevice *dev)
  1407. {
  1408. struct eqos_priv *eqos = dev_get_priv(dev);
  1409. int ret;
  1410. debug("%s(dev=%p):\n", __func__, dev);
  1411. ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
  1412. if (ret) {
  1413. pr_err("reset_get_by_name(rst) failed: %d", ret);
  1414. return ret;
  1415. }
  1416. ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
  1417. &eqos->phy_reset_gpio,
  1418. GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
  1419. if (ret) {
  1420. pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
  1421. goto err_free_reset_eqos;
  1422. }
  1423. ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
  1424. if (ret) {
  1425. pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
  1426. goto err_free_gpio_phy_reset;
  1427. }
  1428. ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
  1429. if (ret) {
  1430. pr_err("clk_get_by_name(master_bus) failed: %d", ret);
  1431. goto err_free_clk_slave_bus;
  1432. }
  1433. ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
  1434. if (ret) {
  1435. pr_err("clk_get_by_name(rx) failed: %d", ret);
  1436. goto err_free_clk_master_bus;
  1437. }
  1438. ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
  1439. if (ret) {
  1440. pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
  1441. goto err_free_clk_rx;
  1442. return ret;
  1443. }
  1444. ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
  1445. if (ret) {
  1446. pr_err("clk_get_by_name(tx) failed: %d", ret);
  1447. goto err_free_clk_ptp_ref;
  1448. }
  1449. debug("%s: OK\n", __func__);
  1450. return 0;
  1451. err_free_clk_ptp_ref:
  1452. clk_free(&eqos->clk_ptp_ref);
  1453. err_free_clk_rx:
  1454. clk_free(&eqos->clk_rx);
  1455. err_free_clk_master_bus:
  1456. clk_free(&eqos->clk_master_bus);
  1457. err_free_clk_slave_bus:
  1458. clk_free(&eqos->clk_slave_bus);
  1459. err_free_gpio_phy_reset:
  1460. dm_gpio_free(dev, &eqos->phy_reset_gpio);
  1461. err_free_reset_eqos:
  1462. reset_free(&eqos->reset_ctl);
  1463. debug("%s: returns %d\n", __func__, ret);
  1464. return ret;
  1465. }
  1466. /* board-specific Ethernet Interface initializations. */
  1467. __weak int board_interface_eth_init(struct udevice *dev,
  1468. phy_interface_t interface_type)
  1469. {
  1470. return 0;
  1471. }
  1472. static int eqos_probe_resources_stm32(struct udevice *dev)
  1473. {
  1474. struct eqos_priv *eqos = dev_get_priv(dev);
  1475. int ret;
  1476. phy_interface_t interface;
  1477. debug("%s(dev=%p):\n", __func__, dev);
  1478. interface = eqos->config->interface(dev);
  1479. if (interface == PHY_INTERFACE_MODE_NONE) {
  1480. pr_err("Invalid PHY interface\n");
  1481. return -EINVAL;
  1482. }
  1483. ret = board_interface_eth_init(dev, interface);
  1484. if (ret)
  1485. return -EINVAL;
  1486. eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
  1487. ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
  1488. if (ret) {
  1489. pr_err("clk_get_by_name(master_bus) failed: %d", ret);
  1490. goto err_probe;
  1491. }
  1492. ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
  1493. if (ret) {
  1494. pr_err("clk_get_by_name(rx) failed: %d", ret);
  1495. goto err_free_clk_master_bus;
  1496. }
  1497. ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
  1498. if (ret) {
  1499. pr_err("clk_get_by_name(tx) failed: %d", ret);
  1500. goto err_free_clk_rx;
  1501. }
  1502. /* Get ETH_CLK clocks (optional) */
  1503. ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
  1504. if (ret)
  1505. pr_warn("No phy clock provided %d", ret);
  1506. debug("%s: OK\n", __func__);
  1507. return 0;
  1508. err_free_clk_rx:
  1509. clk_free(&eqos->clk_rx);
  1510. err_free_clk_master_bus:
  1511. clk_free(&eqos->clk_master_bus);
  1512. err_probe:
  1513. debug("%s: returns %d\n", __func__, ret);
  1514. return ret;
  1515. }
  1516. static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
  1517. {
  1518. const char *phy_mode;
  1519. phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
  1520. debug("%s(dev=%p):\n", __func__, dev);
  1521. phy_mode = dev_read_prop(dev, "phy-mode", NULL);
  1522. if (phy_mode)
  1523. interface = phy_get_interface_by_name(phy_mode);
  1524. return interface;
  1525. }
  1526. static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
  1527. {
  1528. return PHY_INTERFACE_MODE_MII;
  1529. }
  1530. static int eqos_probe_resources_imx(struct udevice *dev)
  1531. {
  1532. struct eqos_priv *eqos = dev_get_priv(dev);
  1533. phy_interface_t interface;
  1534. debug("%s(dev=%p):\n", __func__, dev);
  1535. interface = eqos->config->interface(dev);
  1536. if (interface == PHY_INTERFACE_MODE_NONE) {
  1537. pr_err("Invalid PHY interface\n");
  1538. return -EINVAL;
  1539. }
  1540. debug("%s: OK\n", __func__);
  1541. return 0;
  1542. }
  1543. static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
  1544. {
  1545. const char *phy_mode;
  1546. phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
  1547. debug("%s(dev=%p):\n", __func__, dev);
  1548. phy_mode = dev_read_prop(dev, "phy-mode", NULL);
  1549. if (phy_mode)
  1550. interface = phy_get_interface_by_name(phy_mode);
  1551. return interface;
  1552. }
  1553. static int eqos_probe_resources_jh7110(struct udevice *dev)
  1554. {
  1555. struct eqos_priv *eqos = dev_get_priv(dev);
  1556. phy_interface_t interface;
  1557. int ret;
  1558. interface = eqos->config->interface(dev);
  1559. if (interface == PHY_INTERFACE_MODE_NONE) {
  1560. pr_err("Invalid PHY interface\n");
  1561. return -EINVAL;
  1562. }
  1563. ret = reset_get_bulk(dev, &eqos->reset_bulk);
  1564. if (ret) {
  1565. pr_err("Can't get reset: %d\n", ret);
  1566. return ret;
  1567. }
  1568. ret = clk_get_by_name(dev, "gtx", &eqos->clk_tx);
  1569. if (ret) {
  1570. pr_err("clk_get_by_name(gtx) failed: %d", ret);
  1571. goto err_free_reset_eqos;
  1572. }
  1573. ret = clk_get_bulk(dev, &eqos->clk_bulk);
  1574. if (ret) {
  1575. pr_err("clk_get_bulk failed: %d", ret);
  1576. goto err_free_clk_gtx;
  1577. }
  1578. debug("%s: OK\n", __func__);
  1579. return 0;
  1580. err_free_clk_gtx:
  1581. clk_free(&eqos->clk_tx);
  1582. err_free_reset_eqos:
  1583. reset_release_bulk(&eqos->reset_bulk);
  1584. return ret;
  1585. }
  1586. static phy_interface_t eqos_get_interface_jh7110(struct udevice *dev)
  1587. {
  1588. const char *phy_mode;
  1589. phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
  1590. phy_mode = dev_read_prop(dev, "phy-mode", NULL);
  1591. if (phy_mode)
  1592. interface = phy_get_interface_by_name(phy_mode);
  1593. return interface;
  1594. }
  1595. static int eqos_remove_resources_tegra186(struct udevice *dev)
  1596. {
  1597. struct eqos_priv *eqos = dev_get_priv(dev);
  1598. debug("%s(dev=%p):\n", __func__, dev);
  1599. #ifdef CONFIG_CLK
  1600. clk_free(&eqos->clk_tx);
  1601. clk_free(&eqos->clk_ptp_ref);
  1602. clk_free(&eqos->clk_rx);
  1603. clk_free(&eqos->clk_slave_bus);
  1604. clk_free(&eqos->clk_master_bus);
  1605. #endif
  1606. dm_gpio_free(dev, &eqos->phy_reset_gpio);
  1607. reset_free(&eqos->reset_ctl);
  1608. debug("%s: OK\n", __func__);
  1609. return 0;
  1610. }
  1611. static int eqos_remove_resources_stm32(struct udevice *dev)
  1612. {
  1613. #ifdef CONFIG_CLK
  1614. struct eqos_priv *eqos = dev_get_priv(dev);
  1615. debug("%s(dev=%p):\n", __func__, dev);
  1616. clk_free(&eqos->clk_tx);
  1617. clk_free(&eqos->clk_rx);
  1618. clk_free(&eqos->clk_master_bus);
  1619. if (clk_valid(&eqos->clk_ck))
  1620. clk_free(&eqos->clk_ck);
  1621. #endif
  1622. if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
  1623. dm_gpio_free(dev, &eqos->phy_reset_gpio);
  1624. debug("%s: OK\n", __func__);
  1625. return 0;
  1626. }
  1627. static int eqos_remove_resources_jh7110(struct udevice *dev)
  1628. {
  1629. struct eqos_priv *eqos = dev_get_priv(dev);
  1630. reset_release_bulk(&eqos->reset_bulk);
  1631. clk_release_bulk(&eqos->clk_bulk);
  1632. return 0;
  1633. }
  1634. static int eqos_probe(struct udevice *dev)
  1635. {
  1636. struct eqos_priv *eqos = dev_get_priv(dev);
  1637. int ret;
  1638. debug("%s(dev=%p):\n", __func__, dev);
  1639. eqos->dev = dev;
  1640. eqos->config = (void *)dev_get_driver_data(dev);
  1641. eqos->regs = dev_read_addr(dev);
  1642. if (eqos->regs == FDT_ADDR_T_NONE) {
  1643. pr_err("dev_read_addr() failed");
  1644. return -ENODEV;
  1645. }
  1646. eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
  1647. eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
  1648. eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
  1649. eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
  1650. ret = eqos_probe_resources_core(dev);
  1651. if (ret < 0) {
  1652. pr_err("eqos_probe_resources_core() failed: %d", ret);
  1653. return ret;
  1654. }
  1655. ret = eqos->config->ops->eqos_probe_resources(dev);
  1656. if (ret < 0) {
  1657. pr_err("eqos_probe_resources() failed: %d", ret);
  1658. goto err_remove_resources_core;
  1659. }
  1660. #ifdef CONFIG_DM_ETH_PHY
  1661. eqos->mii = eth_phy_get_mdio_bus(dev);
  1662. #endif
  1663. if (!eqos->mii) {
  1664. eqos->mii = mdio_alloc();
  1665. if (!eqos->mii) {
  1666. pr_err("mdio_alloc() failed");
  1667. ret = -ENOMEM;
  1668. goto err_remove_resources_tegra;
  1669. }
  1670. eqos->mii->read = eqos_mdio_read;
  1671. eqos->mii->write = eqos_mdio_write;
  1672. eqos->mii->priv = eqos;
  1673. strcpy(eqos->mii->name, dev->name);
  1674. ret = mdio_register(eqos->mii);
  1675. if (ret < 0) {
  1676. pr_err("mdio_register() failed: %d", ret);
  1677. goto err_free_mdio;
  1678. }
  1679. }
  1680. #ifdef CONFIG_DM_ETH_PHY
  1681. eth_phy_set_mdio_bus(dev, eqos->mii);
  1682. #endif
  1683. debug("%s: OK\n", __func__);
  1684. return 0;
  1685. err_free_mdio:
  1686. mdio_free(eqos->mii);
  1687. err_remove_resources_tegra:
  1688. eqos->config->ops->eqos_remove_resources(dev);
  1689. err_remove_resources_core:
  1690. eqos_remove_resources_core(dev);
  1691. debug("%s: returns %d\n", __func__, ret);
  1692. return ret;
  1693. }
  1694. static int eqos_remove(struct udevice *dev)
  1695. {
  1696. struct eqos_priv *eqos = dev_get_priv(dev);
  1697. debug("%s(dev=%p):\n", __func__, dev);
  1698. mdio_unregister(eqos->mii);
  1699. mdio_free(eqos->mii);
  1700. eqos->config->ops->eqos_remove_resources(dev);
  1701. eqos_probe_resources_core(dev);
  1702. debug("%s: OK\n", __func__);
  1703. return 0;
  1704. }
  1705. static int eqos_null_ops(struct udevice *dev)
  1706. {
  1707. return 0;
  1708. }
  1709. static const struct eth_ops eqos_ops = {
  1710. .start = eqos_start,
  1711. .stop = eqos_stop,
  1712. .send = eqos_send,
  1713. .recv = eqos_recv,
  1714. .free_pkt = eqos_free_pkt,
  1715. .write_hwaddr = eqos_write_hwaddr,
  1716. .read_rom_hwaddr = eqos_read_rom_hwaddr,
  1717. };
  1718. static struct eqos_ops eqos_tegra186_ops = {
  1719. .eqos_inval_desc = eqos_inval_desc_generic,
  1720. .eqos_flush_desc = eqos_flush_desc_generic,
  1721. .eqos_inval_buffer = eqos_inval_buffer_tegra186,
  1722. .eqos_flush_buffer = eqos_flush_buffer_tegra186,
  1723. .eqos_probe_resources = eqos_probe_resources_tegra186,
  1724. .eqos_remove_resources = eqos_remove_resources_tegra186,
  1725. .eqos_stop_resets = eqos_stop_resets_tegra186,
  1726. .eqos_start_resets = eqos_start_resets_tegra186,
  1727. .eqos_stop_clks = eqos_stop_clks_tegra186,
  1728. .eqos_start_clks = eqos_start_clks_tegra186,
  1729. .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
  1730. .eqos_disable_calibration = eqos_disable_calibration_tegra186,
  1731. .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
  1732. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
  1733. };
  1734. static const struct eqos_config __maybe_unused eqos_tegra186_config = {
  1735. .reg_access_always_ok = false,
  1736. .mdio_wait = 10,
  1737. .swr_wait = 10,
  1738. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
  1739. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
  1740. .axi_bus_width = EQOS_AXI_WIDTH_128,
  1741. .interface = eqos_get_interface_tegra186,
  1742. .ops = &eqos_tegra186_ops
  1743. };
  1744. static struct eqos_ops eqos_stm32_ops = {
  1745. .eqos_inval_desc = eqos_inval_desc_generic,
  1746. .eqos_flush_desc = eqos_flush_desc_generic,
  1747. .eqos_inval_buffer = eqos_inval_buffer_generic,
  1748. .eqos_flush_buffer = eqos_flush_buffer_generic,
  1749. .eqos_probe_resources = eqos_probe_resources_stm32,
  1750. .eqos_remove_resources = eqos_remove_resources_stm32,
  1751. .eqos_stop_resets = eqos_null_ops,
  1752. .eqos_start_resets = eqos_null_ops,
  1753. .eqos_stop_clks = eqos_stop_clks_stm32,
  1754. .eqos_start_clks = eqos_start_clks_stm32,
  1755. .eqos_calibrate_pads = eqos_null_ops,
  1756. .eqos_disable_calibration = eqos_null_ops,
  1757. .eqos_set_tx_clk_speed = eqos_null_ops,
  1758. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
  1759. };
  1760. static const struct eqos_config __maybe_unused eqos_stm32_config = {
  1761. .reg_access_always_ok = false,
  1762. .mdio_wait = 10000,
  1763. .swr_wait = 50,
  1764. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
  1765. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
  1766. .axi_bus_width = EQOS_AXI_WIDTH_64,
  1767. .interface = eqos_get_interface_stm32,
  1768. .ops = &eqos_stm32_ops
  1769. };
  1770. static struct eqos_ops eqos_imx_ops = {
  1771. .eqos_inval_desc = eqos_inval_desc_generic,
  1772. .eqos_flush_desc = eqos_flush_desc_generic,
  1773. .eqos_inval_buffer = eqos_inval_buffer_generic,
  1774. .eqos_flush_buffer = eqos_flush_buffer_generic,
  1775. .eqos_probe_resources = eqos_probe_resources_imx,
  1776. .eqos_remove_resources = eqos_null_ops,
  1777. .eqos_stop_resets = eqos_null_ops,
  1778. .eqos_start_resets = eqos_null_ops,
  1779. .eqos_stop_clks = eqos_null_ops,
  1780. .eqos_start_clks = eqos_null_ops,
  1781. .eqos_calibrate_pads = eqos_null_ops,
  1782. .eqos_disable_calibration = eqos_null_ops,
  1783. .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
  1784. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
  1785. };
  1786. struct eqos_config __maybe_unused eqos_imx_config = {
  1787. .reg_access_always_ok = false,
  1788. .mdio_wait = 10,
  1789. .swr_wait = 50,
  1790. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
  1791. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
  1792. .axi_bus_width = EQOS_AXI_WIDTH_64,
  1793. .interface = eqos_get_interface_imx,
  1794. .ops = &eqos_imx_ops
  1795. };
  1796. static struct eqos_ops eqos_jh7110_ops = {
  1797. .eqos_inval_desc = eqos_inval_desc_generic,
  1798. .eqos_flush_desc = eqos_flush_desc_generic,
  1799. .eqos_inval_buffer = eqos_inval_buffer_generic,
  1800. .eqos_flush_buffer = eqos_flush_buffer_generic,
  1801. .eqos_probe_resources = eqos_probe_resources_jh7110,
  1802. .eqos_remove_resources = eqos_remove_resources_jh7110,
  1803. .eqos_stop_resets = eqos_stop_resets_jh7110,
  1804. .eqos_start_resets = eqos_start_resets_jh7110,
  1805. .eqos_stop_clks = eqos_stop_clks_jh7110,
  1806. .eqos_start_clks = eqos_start_clks_jh7110,
  1807. .eqos_calibrate_pads = eqos_null_ops,
  1808. .eqos_disable_calibration = eqos_null_ops,
  1809. .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_jh7110,
  1810. .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_jh7110
  1811. };
  1812. struct eqos_config __maybe_unused eqos_jh7110_config = {
  1813. .reg_access_always_ok = false,
  1814. .mdio_wait = 10,
  1815. .swr_wait = 50,
  1816. .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
  1817. .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
  1818. .axi_bus_width = EQOS_AXI_WIDTH_64,
  1819. .interface = eqos_get_interface_jh7110,
  1820. .ops = &eqos_jh7110_ops
  1821. };
  1822. static const struct udevice_id eqos_ids[] = {
  1823. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_TEGRA186)
  1824. {
  1825. .compatible = "nvidia,tegra186-eqos",
  1826. .data = (ulong)&eqos_tegra186_config
  1827. },
  1828. #endif
  1829. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_STM32)
  1830. {
  1831. .compatible = "st,stm32mp1-dwmac",
  1832. .data = (ulong)&eqos_stm32_config
  1833. },
  1834. #endif
  1835. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_IMX)
  1836. {
  1837. .compatible = "fsl,imx-eqos",
  1838. .data = (ulong)&eqos_imx_config
  1839. },
  1840. #endif
  1841. #if IS_ENABLED(CONFIG_DWC_ETH_QOS_STARFIVE)
  1842. {
  1843. .compatible = "starfive,jh7110-eqos-5.20",
  1844. .data = (ulong)&eqos_jh7110_config
  1845. },
  1846. #endif
  1847. { }
  1848. };
  1849. U_BOOT_DRIVER(eth_eqos) = {
  1850. .name = "eth_eqos",
  1851. .id = UCLASS_ETH,
  1852. .of_match = of_match_ptr(eqos_ids),
  1853. .probe = eqos_probe,
  1854. .remove = eqos_remove,
  1855. .ops = &eqos_ops,
  1856. .priv_auto = sizeof(struct eqos_priv),
  1857. .plat_auto = sizeof(struct eth_pdata),
  1858. };