cvmx-helper.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Marvell International Ltd.
  4. *
  5. * Helper functions for common, but complicated tasks.
  6. */
  7. #include <log.h>
  8. #include <linux/delay.h>
  9. #include <mach/cvmx-regs.h>
  10. #include <mach/cvmx-csr.h>
  11. #include <mach/cvmx-bootmem.h>
  12. #include <mach/octeon-model.h>
  13. #include <mach/cvmx-fuse.h>
  14. #include <mach/octeon-feature.h>
  15. #include <mach/cvmx-qlm.h>
  16. #include <mach/octeon_qlm.h>
  17. #include <mach/cvmx-pcie.h>
  18. #include <mach/cvmx-coremask.h>
  19. #include <mach/cvmx-agl-defs.h>
  20. #include <mach/cvmx-asxx-defs.h>
  21. #include <mach/cvmx-bgxx-defs.h>
  22. #include <mach/cvmx-dbg-defs.h>
  23. #include <mach/cvmx-gmxx-defs.h>
  24. #include <mach/cvmx-gserx-defs.h>
  25. #include <mach/cvmx-ipd-defs.h>
  26. #include <mach/cvmx-l2c-defs.h>
  27. #include <mach/cvmx-npi-defs.h>
  28. #include <mach/cvmx-pcsx-defs.h>
  29. #include <mach/cvmx-pexp-defs.h>
  30. #include <mach/cvmx-pki-defs.h>
  31. #include <mach/cvmx-pko-defs.h>
  32. #include <mach/cvmx-smix-defs.h>
  33. #include <mach/cvmx-sriox-defs.h>
  34. #include <mach/cvmx-helper.h>
  35. #include <mach/cvmx-helper-board.h>
  36. #include <mach/cvmx-helper-fdt.h>
  37. #include <mach/cvmx-helper-bgx.h>
  38. #include <mach/cvmx-helper-cfg.h>
  39. #include <mach/cvmx-helper-ipd.h>
  40. #include <mach/cvmx-helper-util.h>
  41. #include <mach/cvmx-helper-pki.h>
  42. #include <mach/cvmx-helper-pko.h>
  43. #include <mach/cvmx-helper-pko3.h>
  44. #include <mach/cvmx-global-resources.h>
  45. #include <mach/cvmx-pko-internal-ports-range.h>
  46. #include <mach/cvmx-pko3-queue.h>
  47. #include <mach/cvmx-gmx.h>
  48. #include <mach/cvmx-hwpko.h>
  49. #include <mach/cvmx-ilk.h>
  50. #include <mach/cvmx-ipd.h>
  51. #include <mach/cvmx-pip.h>
  52. /**
  53. * @INTERNAL
  54. * This structure specifies the interface methods used by an interface.
  55. *
  56. * @param mode Interface mode.
  57. *
  58. * @param enumerate Method the get number of interface ports.
  59. *
  60. * @param probe Method to probe an interface to get the number of
  61. * connected ports.
  62. *
  63. * @param enable Method to enable an interface
  64. *
  65. * @param link_get Method to get the state of an interface link.
  66. *
  67. * @param link_set Method to configure an interface link to the specified
  68. * state.
  69. *
  70. * @param loopback Method to configure a port in loopback.
  71. */
  72. struct iface_ops {
  73. cvmx_helper_interface_mode_t mode;
  74. int (*enumerate)(int xiface);
  75. int (*probe)(int xiface);
  76. int (*enable)(int xiface);
  77. cvmx_helper_link_info_t (*link_get)(int ipd_port);
  78. int (*link_set)(int ipd_port, cvmx_helper_link_info_t link_info);
  79. int (*loopback)(int ipd_port, int en_in, int en_ex);
  80. };
  81. /**
  82. * @INTERNAL
  83. * This structure is used by disabled interfaces.
  84. */
  85. static const struct iface_ops iface_ops_dis = {
  86. .mode = CVMX_HELPER_INTERFACE_MODE_DISABLED,
  87. };
  88. /**
  89. * @INTERNAL
  90. * This structure specifies the interface methods used by interfaces
  91. * configured as gmii.
  92. */
  93. static const struct iface_ops iface_ops_gmii = {
  94. .mode = CVMX_HELPER_INTERFACE_MODE_GMII,
  95. .enumerate = __cvmx_helper_rgmii_probe,
  96. .probe = __cvmx_helper_rgmii_probe,
  97. .enable = __cvmx_helper_rgmii_enable,
  98. .link_get = __cvmx_helper_gmii_link_get,
  99. .link_set = __cvmx_helper_rgmii_link_set,
  100. .loopback = __cvmx_helper_rgmii_configure_loopback,
  101. };
  102. /**
  103. * @INTERNAL
  104. * This structure specifies the interface methods used by interfaces
  105. * configured as rgmii.
  106. */
  107. static const struct iface_ops iface_ops_rgmii = {
  108. .mode = CVMX_HELPER_INTERFACE_MODE_RGMII,
  109. .enumerate = __cvmx_helper_rgmii_probe,
  110. .probe = __cvmx_helper_rgmii_probe,
  111. .enable = __cvmx_helper_rgmii_enable,
  112. .link_get = __cvmx_helper_rgmii_link_get,
  113. .link_set = __cvmx_helper_rgmii_link_set,
  114. .loopback = __cvmx_helper_rgmii_configure_loopback,
  115. };
  116. /**
  117. * @INTERNAL
  118. * This structure specifies the interface methods used by interfaces
  119. * configured as sgmii that use the gmx mac.
  120. */
  121. static const struct iface_ops iface_ops_sgmii = {
  122. .mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
  123. .enumerate = __cvmx_helper_sgmii_enumerate,
  124. .probe = __cvmx_helper_sgmii_probe,
  125. .enable = __cvmx_helper_sgmii_enable,
  126. .link_get = __cvmx_helper_sgmii_link_get,
  127. .link_set = __cvmx_helper_sgmii_link_set,
  128. .loopback = __cvmx_helper_sgmii_configure_loopback,
  129. };
  130. /**
  131. * @INTERNAL
  132. * This structure specifies the interface methods used by interfaces
  133. * configured as sgmii that use the bgx mac.
  134. */
  135. static const struct iface_ops iface_ops_bgx_sgmii = {
  136. .mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
  137. .enumerate = __cvmx_helper_bgx_enumerate,
  138. .probe = __cvmx_helper_bgx_probe,
  139. .enable = __cvmx_helper_bgx_sgmii_enable,
  140. .link_get = __cvmx_helper_bgx_sgmii_link_get,
  141. .link_set = __cvmx_helper_bgx_sgmii_link_set,
  142. .loopback = __cvmx_helper_bgx_sgmii_configure_loopback,
  143. };
  144. /**
  145. * @INTERNAL
  146. * This structure specifies the interface methods used by interfaces
  147. * configured as qsgmii.
  148. */
  149. static const struct iface_ops iface_ops_qsgmii = {
  150. .mode = CVMX_HELPER_INTERFACE_MODE_QSGMII,
  151. .enumerate = __cvmx_helper_sgmii_enumerate,
  152. .probe = __cvmx_helper_sgmii_probe,
  153. .enable = __cvmx_helper_sgmii_enable,
  154. .link_get = __cvmx_helper_sgmii_link_get,
  155. .link_set = __cvmx_helper_sgmii_link_set,
  156. .loopback = __cvmx_helper_sgmii_configure_loopback,
  157. };
  158. /**
  159. * @INTERNAL
  160. * This structure specifies the interface methods used by interfaces
  161. * configured as xaui using the gmx mac.
  162. */
  163. static const struct iface_ops iface_ops_xaui = {
  164. .mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
  165. .enumerate = __cvmx_helper_xaui_enumerate,
  166. .probe = __cvmx_helper_xaui_probe,
  167. .enable = __cvmx_helper_xaui_enable,
  168. .link_get = __cvmx_helper_xaui_link_get,
  169. .link_set = __cvmx_helper_xaui_link_set,
  170. .loopback = __cvmx_helper_xaui_configure_loopback,
  171. };
  172. /**
  173. * @INTERNAL
  174. * This structure specifies the interface methods used by interfaces
  175. * configured as xaui using the gmx mac.
  176. */
  177. static const struct iface_ops iface_ops_bgx_xaui = {
  178. .mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
  179. .enumerate = __cvmx_helper_bgx_enumerate,
  180. .probe = __cvmx_helper_bgx_probe,
  181. .enable = __cvmx_helper_bgx_xaui_enable,
  182. .link_get = __cvmx_helper_bgx_xaui_link_get,
  183. .link_set = __cvmx_helper_bgx_xaui_link_set,
  184. .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
  185. };
  186. /**
  187. * @INTERNAL
  188. * This structure specifies the interface methods used by interfaces
  189. * configured as rxaui.
  190. */
  191. static const struct iface_ops iface_ops_rxaui = {
  192. .mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
  193. .enumerate = __cvmx_helper_xaui_enumerate,
  194. .probe = __cvmx_helper_xaui_probe,
  195. .enable = __cvmx_helper_xaui_enable,
  196. .link_get = __cvmx_helper_xaui_link_get,
  197. .link_set = __cvmx_helper_xaui_link_set,
  198. .loopback = __cvmx_helper_xaui_configure_loopback,
  199. };
  200. /**
  201. * @INTERNAL
  202. * This structure specifies the interface methods used by interfaces
  203. * configured as xaui using the gmx mac.
  204. */
  205. static const struct iface_ops iface_ops_bgx_rxaui = {
  206. .mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
  207. .enumerate = __cvmx_helper_bgx_enumerate,
  208. .probe = __cvmx_helper_bgx_probe,
  209. .enable = __cvmx_helper_bgx_xaui_enable,
  210. .link_get = __cvmx_helper_bgx_xaui_link_get,
  211. .link_set = __cvmx_helper_bgx_xaui_link_set,
  212. .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
  213. };
  214. /**
  215. * @INTERNAL
  216. * This structure specifies the interface methods used by interfaces
  217. * configured as xlaui.
  218. */
  219. static const struct iface_ops iface_ops_bgx_xlaui = {
  220. .mode = CVMX_HELPER_INTERFACE_MODE_XLAUI,
  221. .enumerate = __cvmx_helper_bgx_enumerate,
  222. .probe = __cvmx_helper_bgx_probe,
  223. .enable = __cvmx_helper_bgx_xaui_enable,
  224. .link_get = __cvmx_helper_bgx_xaui_link_get,
  225. .link_set = __cvmx_helper_bgx_xaui_link_set,
  226. .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
  227. };
  228. /**
  229. * @INTERNAL
  230. * This structure specifies the interface methods used by interfaces
  231. * configured as xfi.
  232. */
  233. static const struct iface_ops iface_ops_bgx_xfi = {
  234. .mode = CVMX_HELPER_INTERFACE_MODE_XFI,
  235. .enumerate = __cvmx_helper_bgx_enumerate,
  236. .probe = __cvmx_helper_bgx_probe,
  237. .enable = __cvmx_helper_bgx_xaui_enable,
  238. .link_get = __cvmx_helper_bgx_xaui_link_get,
  239. .link_set = __cvmx_helper_bgx_xaui_link_set,
  240. .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
  241. };
  242. static const struct iface_ops iface_ops_bgx_10G_KR = {
  243. .mode = CVMX_HELPER_INTERFACE_MODE_10G_KR,
  244. .enumerate = __cvmx_helper_bgx_enumerate,
  245. .probe = __cvmx_helper_bgx_probe,
  246. .enable = __cvmx_helper_bgx_xaui_enable,
  247. .link_get = __cvmx_helper_bgx_xaui_link_get,
  248. .link_set = __cvmx_helper_bgx_xaui_link_set,
  249. .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
  250. };
  251. static const struct iface_ops iface_ops_bgx_40G_KR4 = {
  252. .mode = CVMX_HELPER_INTERFACE_MODE_40G_KR4,
  253. .enumerate = __cvmx_helper_bgx_enumerate,
  254. .probe = __cvmx_helper_bgx_probe,
  255. .enable = __cvmx_helper_bgx_xaui_enable,
  256. .link_get = __cvmx_helper_bgx_xaui_link_get,
  257. .link_set = __cvmx_helper_bgx_xaui_link_set,
  258. .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
  259. };
  260. /**
  261. * @INTERNAL
  262. * This structure specifies the interface methods used by interfaces
  263. * configured as ilk.
  264. */
  265. static const struct iface_ops iface_ops_ilk = {
  266. .mode = CVMX_HELPER_INTERFACE_MODE_ILK,
  267. .enumerate = __cvmx_helper_ilk_enumerate,
  268. .probe = __cvmx_helper_ilk_probe,
  269. .enable = __cvmx_helper_ilk_enable,
  270. .link_get = __cvmx_helper_ilk_link_get,
  271. .link_set = __cvmx_helper_ilk_link_set,
  272. };
  273. /**
  274. * @INTERNAL
  275. * This structure specifies the interface methods used by interfaces
  276. * configured as npi.
  277. */
  278. static const struct iface_ops iface_ops_npi = {
  279. .mode = CVMX_HELPER_INTERFACE_MODE_NPI,
  280. .enumerate = __cvmx_helper_npi_probe,
  281. .probe = __cvmx_helper_npi_probe,
  282. .enable = __cvmx_helper_npi_enable,
  283. };
  284. /**
  285. * @INTERNAL
  286. * This structure specifies the interface methods used by interfaces
  287. * configured as srio.
  288. */
  289. static const struct iface_ops iface_ops_srio = {
  290. .mode = CVMX_HELPER_INTERFACE_MODE_SRIO,
  291. .enumerate = __cvmx_helper_srio_probe,
  292. .probe = __cvmx_helper_srio_probe,
  293. .enable = __cvmx_helper_srio_enable,
  294. .link_get = __cvmx_helper_srio_link_get,
  295. .link_set = __cvmx_helper_srio_link_set,
  296. };
  297. /**
  298. * @INTERNAL
  299. * This structure specifies the interface methods used by interfaces
  300. * configured as agl.
  301. */
  302. static const struct iface_ops iface_ops_agl = {
  303. .mode = CVMX_HELPER_INTERFACE_MODE_AGL,
  304. .enumerate = __cvmx_helper_agl_enumerate,
  305. .probe = __cvmx_helper_agl_probe,
  306. .enable = __cvmx_helper_agl_enable,
  307. .link_get = __cvmx_helper_agl_link_get,
  308. .link_set = __cvmx_helper_agl_link_set,
  309. };
  310. /**
  311. * @INTERNAL
  312. * This structure specifies the interface methods used by interfaces
  313. * configured as mixed mode, some ports are sgmii and some are xfi.
  314. */
  315. static const struct iface_ops iface_ops_bgx_mixed = {
  316. .mode = CVMX_HELPER_INTERFACE_MODE_MIXED,
  317. .enumerate = __cvmx_helper_bgx_enumerate,
  318. .probe = __cvmx_helper_bgx_probe,
  319. .enable = __cvmx_helper_bgx_mixed_enable,
  320. .link_get = __cvmx_helper_bgx_mixed_link_get,
  321. .link_set = __cvmx_helper_bgx_mixed_link_set,
  322. .loopback = __cvmx_helper_bgx_mixed_configure_loopback,
  323. };
  324. /**
  325. * @INTERNAL
  326. * This structure specifies the interface methods used by interfaces
  327. * configured as loop.
  328. */
  329. static const struct iface_ops iface_ops_loop = {
  330. .mode = CVMX_HELPER_INTERFACE_MODE_LOOP,
  331. .enumerate = __cvmx_helper_loop_enumerate,
  332. .probe = __cvmx_helper_loop_probe,
  333. };
  334. const struct iface_ops *iface_node_ops[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
  335. #define iface_ops iface_node_ops[0]
  336. struct cvmx_iface {
  337. int cvif_ipd_nports;
  338. int cvif_has_fcs; /* PKO fcs for this interface. */
  339. enum cvmx_pko_padding cvif_padding;
  340. cvmx_helper_link_info_t *cvif_ipd_port_link_info;
  341. };
  342. /*
  343. * This has to be static as u-boot expects to probe an interface and
  344. * gets the number of its ports.
  345. */
  346. static struct cvmx_iface cvmx_interfaces[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
  347. int __cvmx_helper_get_num_ipd_ports(int xiface)
  348. {
  349. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  350. struct cvmx_iface *piface;
  351. if (xi.interface >= cvmx_helper_get_number_of_interfaces())
  352. return -1;
  353. piface = &cvmx_interfaces[xi.node][xi.interface];
  354. return piface->cvif_ipd_nports;
  355. }
  356. enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int xiface)
  357. {
  358. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  359. struct cvmx_iface *piface;
  360. if (xi.interface >= cvmx_helper_get_number_of_interfaces())
  361. return CVMX_PKO_PADDING_NONE;
  362. piface = &cvmx_interfaces[xi.node][xi.interface];
  363. return piface->cvif_padding;
  364. }
  365. int __cvmx_helper_init_interface(int xiface, int num_ipd_ports, int has_fcs,
  366. enum cvmx_pko_padding pad)
  367. {
  368. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  369. struct cvmx_iface *piface;
  370. cvmx_helper_link_info_t *p;
  371. int i;
  372. int sz;
  373. u64 addr;
  374. char name[32];
  375. if (xi.interface >= cvmx_helper_get_number_of_interfaces())
  376. return -1;
  377. piface = &cvmx_interfaces[xi.node][xi.interface];
  378. piface->cvif_ipd_nports = num_ipd_ports;
  379. piface->cvif_padding = pad;
  380. piface->cvif_has_fcs = has_fcs;
  381. /*
  382. * allocate the per-ipd_port link_info structure
  383. */
  384. sz = piface->cvif_ipd_nports * sizeof(cvmx_helper_link_info_t);
  385. snprintf(name, sizeof(name), "__int_%d_link_info", xi.interface);
  386. addr = CAST64(cvmx_bootmem_alloc_named_range_once(sz, 0, 0,
  387. __alignof(cvmx_helper_link_info_t),
  388. name, NULL));
  389. piface->cvif_ipd_port_link_info =
  390. (cvmx_helper_link_info_t *)__cvmx_phys_addr_to_ptr(addr, sz);
  391. if (!piface->cvif_ipd_port_link_info) {
  392. if (sz != 0)
  393. debug("iface %d failed to alloc link info\n", xi.interface);
  394. return -1;
  395. }
  396. /* Initialize them */
  397. p = piface->cvif_ipd_port_link_info;
  398. for (i = 0; i < piface->cvif_ipd_nports; i++) {
  399. (*p).u64 = 0;
  400. p++;
  401. }
  402. return 0;
  403. }
  404. /*
  405. * Shut down the interfaces; free the resources.
  406. * @INTERNAL
  407. */
  408. void __cvmx_helper_shutdown_interfaces_node(unsigned int node)
  409. {
  410. int i;
  411. int nifaces; /* number of interfaces */
  412. struct cvmx_iface *piface;
  413. nifaces = cvmx_helper_get_number_of_interfaces();
  414. for (i = 0; i < nifaces; i++) {
  415. piface = &cvmx_interfaces[node][i];
  416. /*
  417. * For SE apps, bootmem was meant to be allocated and never
  418. * freed.
  419. */
  420. piface->cvif_ipd_port_link_info = 0;
  421. }
  422. }
  423. void __cvmx_helper_shutdown_interfaces(void)
  424. {
  425. unsigned int node = cvmx_get_node_num();
  426. __cvmx_helper_shutdown_interfaces_node(node);
  427. }
  428. int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info)
  429. {
  430. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  431. struct cvmx_iface *piface;
  432. if (xi.interface >= cvmx_helper_get_number_of_interfaces())
  433. return -1;
  434. piface = &cvmx_interfaces[xi.node][xi.interface];
  435. if (piface->cvif_ipd_port_link_info) {
  436. piface->cvif_ipd_port_link_info[index] = link_info;
  437. return 0;
  438. }
  439. return -1;
  440. }
  441. cvmx_helper_link_info_t __cvmx_helper_get_link_info(int xiface, int port)
  442. {
  443. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  444. struct cvmx_iface *piface;
  445. cvmx_helper_link_info_t err;
  446. err.u64 = 0;
  447. if (xi.interface >= cvmx_helper_get_number_of_interfaces())
  448. return err;
  449. piface = &cvmx_interfaces[xi.node][xi.interface];
  450. if (piface->cvif_ipd_port_link_info)
  451. return piface->cvif_ipd_port_link_info[port];
  452. return err;
  453. }
  454. /**
  455. * Returns if FCS is enabled for the specified interface and port
  456. *
  457. * @param xiface - interface to check
  458. *
  459. * Return: zero if FCS is not used, otherwise FCS is used.
  460. */
  461. int __cvmx_helper_get_has_fcs(int xiface)
  462. {
  463. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  464. return cvmx_interfaces[xi.node][xi.interface].cvif_has_fcs;
  465. }
  466. u64 cvmx_rgmii_backpressure_dis = 1;
  467. typedef int (*cvmx_export_config_t)(void);
  468. cvmx_export_config_t cvmx_export_app_config;
  469. void cvmx_rgmii_set_back_pressure(uint64_t backpressure_dis)
  470. {
  471. cvmx_rgmii_backpressure_dis = backpressure_dis;
  472. }
  473. /*
  474. * internal functions that are not exported in the .h file but must be
  475. * declared to make gcc happy.
  476. */
  477. extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port);
  478. /**
  479. * cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
  480. * It is meant to allow customization of interfaces which do not have a PHY.
  481. *
  482. * @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides TX_CONFIG_REG.
  483. *
  484. * If this function pointer is NULL then it defaults to the MAC.
  485. */
  486. int (*cvmx_override_iface_phy_mode)(int interface, int index);
  487. /**
  488. * cvmx_override_ipd_port_setup(int ipd_port) is a function
  489. * pointer. It is meant to allow customization of the IPD
  490. * port/port kind setup before packet input/output comes online.
  491. * It is called after cvmx-helper does the default IPD configuration,
  492. * but before IPD is enabled. Users should set this pointer to a
  493. * function before calling any cvmx-helper operations.
  494. */
  495. void (*cvmx_override_ipd_port_setup)(int ipd_port) = NULL;
  496. /**
  497. * Return the number of interfaces the chip has. Each interface
  498. * may have multiple ports. Most chips support two interfaces,
  499. * but the CNX0XX and CNX1XX are exceptions. These only support
  500. * one interface.
  501. *
  502. * Return: Number of interfaces on chip
  503. */
  504. int cvmx_helper_get_number_of_interfaces(void)
  505. {
  506. if (OCTEON_IS_MODEL(OCTEON_CN68XX))
  507. return 9;
  508. else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
  509. if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
  510. return 7;
  511. else
  512. return 8;
  513. else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
  514. return 6;
  515. else if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
  516. return 4;
  517. else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
  518. return 5;
  519. else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  520. return 10;
  521. else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
  522. return 5;
  523. else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  524. return 5;
  525. else
  526. return 3;
  527. }
  528. int __cvmx_helper_early_ports_on_interface(int interface)
  529. {
  530. int ports;
  531. if (octeon_has_feature(OCTEON_FEATURE_PKND))
  532. return cvmx_helper_interface_enumerate(interface);
  533. ports = cvmx_helper_interface_enumerate(interface);
  534. ports = __cvmx_helper_board_interface_probe(interface, ports);
  535. return ports;
  536. }
  537. /**
  538. * Return the number of ports on an interface. Depending on the
  539. * chip and configuration, this can be 1-16. A value of 0
  540. * specifies that the interface doesn't exist or isn't usable.
  541. *
  542. * @param xiface xiface to get the port count for
  543. *
  544. * Return: Number of ports on interface. Can be Zero.
  545. */
  546. int cvmx_helper_ports_on_interface(int xiface)
  547. {
  548. if (octeon_has_feature(OCTEON_FEATURE_PKND))
  549. return cvmx_helper_interface_enumerate(xiface);
  550. else
  551. return __cvmx_helper_get_num_ipd_ports(xiface);
  552. }
  553. /**
  554. * @INTERNAL
  555. * Return interface mode for CN70XX.
  556. */
  557. static cvmx_helper_interface_mode_t __cvmx_get_mode_cn70xx(int interface)
  558. {
  559. /* SGMII/RXAUI/QSGMII */
  560. if (interface < 2) {
  561. enum cvmx_qlm_mode qlm_mode =
  562. cvmx_qlm_get_dlm_mode(0, interface);
  563. if (qlm_mode == CVMX_QLM_MODE_SGMII)
  564. iface_ops[interface] = &iface_ops_sgmii;
  565. else if (qlm_mode == CVMX_QLM_MODE_QSGMII)
  566. iface_ops[interface] = &iface_ops_qsgmii;
  567. else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
  568. iface_ops[interface] = &iface_ops_rxaui;
  569. else
  570. iface_ops[interface] = &iface_ops_dis;
  571. } else if (interface == 2) { /* DPI */
  572. iface_ops[interface] = &iface_ops_npi;
  573. } else if (interface == 3) { /* LOOP */
  574. iface_ops[interface] = &iface_ops_loop;
  575. } else if (interface == 4) { /* RGMII (AGL) */
  576. cvmx_agl_prtx_ctl_t prtx_ctl;
  577. prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(0));
  578. if (prtx_ctl.s.mode == 0)
  579. iface_ops[interface] = &iface_ops_agl;
  580. else
  581. iface_ops[interface] = &iface_ops_dis;
  582. } else {
  583. iface_ops[interface] = &iface_ops_dis;
  584. }
  585. return iface_ops[interface]->mode;
  586. }
  587. /**
  588. * @INTERNAL
  589. * Return interface mode for CN78XX.
  590. */
  591. static cvmx_helper_interface_mode_t __cvmx_get_mode_cn78xx(int xiface)
  592. {
  593. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  594. /* SGMII/RXAUI/XAUI */
  595. if (xi.interface < 6) {
  596. int qlm = cvmx_qlm_lmac(xiface, 0);
  597. enum cvmx_qlm_mode qlm_mode;
  598. if (qlm == -1) {
  599. iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
  600. return iface_node_ops[xi.node][xi.interface]->mode;
  601. }
  602. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, qlm);
  603. if (qlm_mode == CVMX_QLM_MODE_SGMII)
  604. iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_sgmii;
  605. else if (qlm_mode == CVMX_QLM_MODE_XAUI)
  606. iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xaui;
  607. else if (qlm_mode == CVMX_QLM_MODE_XLAUI)
  608. iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xlaui;
  609. else if (qlm_mode == CVMX_QLM_MODE_XFI)
  610. iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xfi;
  611. else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
  612. iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_rxaui;
  613. else
  614. iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
  615. } else if (xi.interface < 8) {
  616. enum cvmx_qlm_mode qlm_mode;
  617. int found = 0;
  618. int i;
  619. int intf, lane_mask;
  620. if (xi.interface == 6) {
  621. intf = 6;
  622. lane_mask = cvmx_ilk_lane_mask[xi.node][0];
  623. } else {
  624. intf = 7;
  625. lane_mask = cvmx_ilk_lane_mask[xi.node][1];
  626. }
  627. switch (lane_mask) {
  628. default:
  629. case 0x0:
  630. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  631. break;
  632. case 0xf:
  633. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 4);
  634. if (qlm_mode == CVMX_QLM_MODE_ILK)
  635. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  636. else
  637. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  638. break;
  639. case 0xff:
  640. found = 0;
  641. for (i = 4; i < 6; i++) {
  642. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
  643. if (qlm_mode == CVMX_QLM_MODE_ILK)
  644. found++;
  645. }
  646. if (found == 2)
  647. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  648. else
  649. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  650. break;
  651. case 0xfff:
  652. found = 0;
  653. for (i = 4; i < 7; i++) {
  654. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
  655. if (qlm_mode == CVMX_QLM_MODE_ILK)
  656. found++;
  657. }
  658. if (found == 3)
  659. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  660. else
  661. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  662. break;
  663. case 0xff00:
  664. found = 0;
  665. for (i = 6; i < 8; i++) {
  666. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
  667. if (qlm_mode == CVMX_QLM_MODE_ILK)
  668. found++;
  669. }
  670. if (found == 2)
  671. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  672. else
  673. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  674. break;
  675. case 0xf0:
  676. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 5);
  677. if (qlm_mode == CVMX_QLM_MODE_ILK)
  678. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  679. else
  680. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  681. break;
  682. case 0xf00:
  683. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 6);
  684. if (qlm_mode == CVMX_QLM_MODE_ILK)
  685. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  686. else
  687. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  688. break;
  689. case 0xf000:
  690. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 7);
  691. if (qlm_mode == CVMX_QLM_MODE_ILK)
  692. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  693. else
  694. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  695. break;
  696. case 0xfff0:
  697. found = 0;
  698. for (i = 5; i < 8; i++) {
  699. qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
  700. if (qlm_mode == CVMX_QLM_MODE_ILK)
  701. found++;
  702. }
  703. if (found == 3)
  704. iface_node_ops[xi.node][intf] = &iface_ops_ilk;
  705. else
  706. iface_node_ops[xi.node][intf] = &iface_ops_dis;
  707. break;
  708. }
  709. } else if (xi.interface == 8) { /* DPI */
  710. int qlm = 0;
  711. for (qlm = 0; qlm < 5; qlm++) {
  712. /* if GSERX_CFG[pcie] == 1, then enable npi */
  713. if (csr_rd_node(xi.node, CVMX_GSERX_CFG(qlm)) & 0x1) {
  714. iface_node_ops[xi.node][xi.interface] =
  715. &iface_ops_npi;
  716. return iface_node_ops[xi.node][xi.interface]->mode;
  717. }
  718. }
  719. iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
  720. } else if (xi.interface == 9) { /* LOOP */
  721. iface_node_ops[xi.node][xi.interface] = &iface_ops_loop;
  722. } else {
  723. iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
  724. }
  725. return iface_node_ops[xi.node][xi.interface]->mode;
  726. }
  727. /**
  728. * @INTERNAL
  729. * Return interface mode for CN73XX.
  730. */
  731. static cvmx_helper_interface_mode_t __cvmx_get_mode_cn73xx(int xiface)
  732. {
  733. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  734. int interface = xi.interface;
  735. /* SGMII/XAUI/XLAUI/XFI */
  736. if (interface < 3) {
  737. int qlm = cvmx_qlm_lmac(xiface, 0);
  738. enum cvmx_qlm_mode qlm_mode;
  739. if (qlm == -1) {
  740. iface_ops[interface] = &iface_ops_dis;
  741. return iface_ops[interface]->mode;
  742. }
  743. qlm_mode = cvmx_qlm_get_mode(qlm);
  744. switch (qlm_mode) {
  745. case CVMX_QLM_MODE_SGMII:
  746. case CVMX_QLM_MODE_SGMII_2X1:
  747. case CVMX_QLM_MODE_RGMII_SGMII:
  748. case CVMX_QLM_MODE_RGMII_SGMII_1X1:
  749. iface_ops[interface] = &iface_ops_bgx_sgmii;
  750. break;
  751. case CVMX_QLM_MODE_XAUI:
  752. case CVMX_QLM_MODE_RGMII_XAUI:
  753. iface_ops[interface] = &iface_ops_bgx_xaui;
  754. break;
  755. case CVMX_QLM_MODE_RXAUI:
  756. case CVMX_QLM_MODE_RXAUI_1X2:
  757. case CVMX_QLM_MODE_RGMII_RXAUI:
  758. iface_ops[interface] = &iface_ops_bgx_rxaui;
  759. break;
  760. case CVMX_QLM_MODE_XLAUI:
  761. case CVMX_QLM_MODE_RGMII_XLAUI:
  762. iface_ops[interface] = &iface_ops_bgx_xlaui;
  763. break;
  764. case CVMX_QLM_MODE_XFI:
  765. case CVMX_QLM_MODE_XFI_1X2:
  766. case CVMX_QLM_MODE_RGMII_XFI:
  767. iface_ops[interface] = &iface_ops_bgx_xfi;
  768. break;
  769. case CVMX_QLM_MODE_10G_KR:
  770. case CVMX_QLM_MODE_10G_KR_1X2:
  771. case CVMX_QLM_MODE_RGMII_10G_KR:
  772. iface_ops[interface] = &iface_ops_bgx_10G_KR;
  773. break;
  774. case CVMX_QLM_MODE_40G_KR4:
  775. case CVMX_QLM_MODE_RGMII_40G_KR4:
  776. iface_ops[interface] = &iface_ops_bgx_40G_KR4;
  777. break;
  778. case CVMX_QLM_MODE_MIXED:
  779. iface_ops[interface] = &iface_ops_bgx_mixed;
  780. break;
  781. default:
  782. iface_ops[interface] = &iface_ops_dis;
  783. break;
  784. }
  785. } else if (interface == 3) { /* DPI */
  786. iface_ops[interface] = &iface_ops_npi;
  787. } else if (interface == 4) { /* LOOP */
  788. iface_ops[interface] = &iface_ops_loop;
  789. } else {
  790. iface_ops[interface] = &iface_ops_dis;
  791. }
  792. return iface_ops[interface]->mode;
  793. }
  794. /**
  795. * @INTERNAL
  796. * Return interface mode for CNF75XX.
  797. *
  798. * CNF75XX has a single BGX block, which is attached to two DLMs,
  799. * the first, GSER4 only supports SGMII mode, while the second,
  800. * GSER5 supports 1G/10G single late modes, i.e. SGMII, XFI, 10G-KR.
  801. * Each half-BGX is thus designated as a separate interface with two ports each.
  802. */
  803. static cvmx_helper_interface_mode_t __cvmx_get_mode_cnf75xx(int xiface)
  804. {
  805. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  806. int interface = xi.interface;
  807. /* BGX0: SGMII (DLM4/DLM5)/XFI(DLM5) */
  808. if (interface < 1) {
  809. enum cvmx_qlm_mode qlm_mode;
  810. int qlm = cvmx_qlm_lmac(xiface, 0);
  811. if (qlm == -1) {
  812. iface_ops[interface] = &iface_ops_dis;
  813. return iface_ops[interface]->mode;
  814. }
  815. qlm_mode = cvmx_qlm_get_mode(qlm);
  816. switch (qlm_mode) {
  817. case CVMX_QLM_MODE_SGMII:
  818. case CVMX_QLM_MODE_SGMII_2X1:
  819. iface_ops[interface] = &iface_ops_bgx_sgmii;
  820. break;
  821. case CVMX_QLM_MODE_XFI_1X2:
  822. iface_ops[interface] = &iface_ops_bgx_xfi;
  823. break;
  824. case CVMX_QLM_MODE_10G_KR_1X2:
  825. iface_ops[interface] = &iface_ops_bgx_10G_KR;
  826. break;
  827. case CVMX_QLM_MODE_MIXED:
  828. iface_ops[interface] = &iface_ops_bgx_mixed;
  829. break;
  830. default:
  831. iface_ops[interface] = &iface_ops_dis;
  832. break;
  833. }
  834. } else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
  835. cvmx_sriox_status_reg_t sriox_status_reg;
  836. int srio_port = interface - 1;
  837. sriox_status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(srio_port));
  838. if (sriox_status_reg.s.srio)
  839. iface_ops[interface] = &iface_ops_srio;
  840. else
  841. iface_ops[interface] = &iface_ops_dis;
  842. } else if (interface == 3) { /* DPI */
  843. iface_ops[interface] = &iface_ops_npi;
  844. } else if (interface == 4) { /* LOOP */
  845. iface_ops[interface] = &iface_ops_loop;
  846. } else {
  847. iface_ops[interface] = &iface_ops_dis;
  848. }
  849. return iface_ops[interface]->mode;
  850. }
  851. /**
  852. * @INTERNAL
  853. * Return interface mode for CN68xx.
  854. */
  855. static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
  856. {
  857. union cvmx_mio_qlmx_cfg qlm_cfg;
  858. switch (interface) {
  859. case 0:
  860. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
  861. /* QLM is disabled when QLM SPD is 15. */
  862. if (qlm_cfg.s.qlm_spd == 15)
  863. iface_ops[interface] = &iface_ops_dis;
  864. else if (qlm_cfg.s.qlm_cfg == 7)
  865. iface_ops[interface] = &iface_ops_rxaui;
  866. else if (qlm_cfg.s.qlm_cfg == 2)
  867. iface_ops[interface] = &iface_ops_sgmii;
  868. else if (qlm_cfg.s.qlm_cfg == 3)
  869. iface_ops[interface] = &iface_ops_xaui;
  870. else
  871. iface_ops[interface] = &iface_ops_dis;
  872. break;
  873. case 1:
  874. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
  875. /* QLM is disabled when QLM SPD is 15. */
  876. if (qlm_cfg.s.qlm_spd == 15)
  877. iface_ops[interface] = &iface_ops_dis;
  878. else if (qlm_cfg.s.qlm_cfg == 7)
  879. iface_ops[interface] = &iface_ops_rxaui;
  880. else
  881. iface_ops[interface] = &iface_ops_dis;
  882. break;
  883. case 2:
  884. case 3:
  885. case 4:
  886. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface));
  887. /* QLM is disabled when QLM SPD is 15. */
  888. if (qlm_cfg.s.qlm_spd == 15)
  889. iface_ops[interface] = &iface_ops_dis;
  890. else if (qlm_cfg.s.qlm_cfg == 2)
  891. iface_ops[interface] = &iface_ops_sgmii;
  892. else if (qlm_cfg.s.qlm_cfg == 3)
  893. iface_ops[interface] = &iface_ops_xaui;
  894. else
  895. iface_ops[interface] = &iface_ops_dis;
  896. break;
  897. case 5:
  898. case 6:
  899. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface - 4));
  900. /* QLM is disabled when QLM SPD is 15. */
  901. if (qlm_cfg.s.qlm_spd == 15)
  902. iface_ops[interface] = &iface_ops_dis;
  903. else if (qlm_cfg.s.qlm_cfg == 1)
  904. iface_ops[interface] = &iface_ops_ilk;
  905. else
  906. iface_ops[interface] = &iface_ops_dis;
  907. break;
  908. case 7: {
  909. union cvmx_mio_qlmx_cfg qlm_cfg1;
  910. /* Check if PCIe0/PCIe1 is configured for PCIe */
  911. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(3));
  912. qlm_cfg1.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
  913. /* QLM is disabled when QLM SPD is 15. */
  914. if ((qlm_cfg.s.qlm_spd != 15 && qlm_cfg.s.qlm_cfg == 0) ||
  915. (qlm_cfg1.s.qlm_spd != 15 && qlm_cfg1.s.qlm_cfg == 0))
  916. iface_ops[interface] = &iface_ops_npi;
  917. else
  918. iface_ops[interface] = &iface_ops_dis;
  919. } break;
  920. case 8:
  921. iface_ops[interface] = &iface_ops_loop;
  922. break;
  923. default:
  924. iface_ops[interface] = &iface_ops_dis;
  925. break;
  926. }
  927. return iface_ops[interface]->mode;
  928. }
  929. /**
  930. * @INTERNAL
  931. * Return interface mode for an Octeon II
  932. */
  933. static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
  934. {
  935. union cvmx_gmxx_inf_mode mode;
  936. if (OCTEON_IS_MODEL(OCTEON_CN68XX))
  937. return __cvmx_get_mode_cn68xx(interface);
  938. if (interface == 2) {
  939. iface_ops[interface] = &iface_ops_npi;
  940. } else if (interface == 3) {
  941. iface_ops[interface] = &iface_ops_loop;
  942. } else if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
  943. (interface == 4 || interface == 5)) ||
  944. (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
  945. interface <= 7)) {
  946. /* Only present in CN63XX & CN66XX Octeon model */
  947. union cvmx_sriox_status_reg sriox_status_reg;
  948. /* cn66xx pass1.0 has only 2 SRIO interfaces. */
  949. if ((interface == 5 || interface == 7) &&
  950. OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0)) {
  951. iface_ops[interface] = &iface_ops_dis;
  952. } else if (interface == 5 && OCTEON_IS_MODEL(OCTEON_CN66XX)) {
  953. /*
  954. * Later passes of cn66xx support SRIO0 - x4/x2/x1,
  955. * SRIO2 - x2/x1, SRIO3 - x1
  956. */
  957. iface_ops[interface] = &iface_ops_dis;
  958. } else {
  959. sriox_status_reg.u64 =
  960. csr_rd(CVMX_SRIOX_STATUS_REG(interface - 4));
  961. if (sriox_status_reg.s.srio)
  962. iface_ops[interface] = &iface_ops_srio;
  963. else
  964. iface_ops[interface] = &iface_ops_dis;
  965. }
  966. } else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
  967. union cvmx_mio_qlmx_cfg mio_qlm_cfg;
  968. /* QLM2 is SGMII0 and QLM1 is SGMII1 */
  969. if (interface == 0) {
  970. mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
  971. } else if (interface == 1) {
  972. mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
  973. } else {
  974. iface_ops[interface] = &iface_ops_dis;
  975. return iface_ops[interface]->mode;
  976. }
  977. if (mio_qlm_cfg.s.qlm_spd == 15)
  978. iface_ops[interface] = &iface_ops_dis;
  979. else if (mio_qlm_cfg.s.qlm_cfg == 9)
  980. iface_ops[interface] = &iface_ops_sgmii;
  981. else if (mio_qlm_cfg.s.qlm_cfg == 11)
  982. iface_ops[interface] = &iface_ops_xaui;
  983. else
  984. iface_ops[interface] = &iface_ops_dis;
  985. } else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
  986. union cvmx_mio_qlmx_cfg qlm_cfg;
  987. if (interface == 0) {
  988. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
  989. } else if (interface == 1) {
  990. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
  991. } else {
  992. iface_ops[interface] = &iface_ops_dis;
  993. return iface_ops[interface]->mode;
  994. }
  995. if (qlm_cfg.s.qlm_spd == 15)
  996. iface_ops[interface] = &iface_ops_dis;
  997. else if (qlm_cfg.s.qlm_cfg == 2)
  998. iface_ops[interface] = &iface_ops_sgmii;
  999. else if (qlm_cfg.s.qlm_cfg == 3)
  1000. iface_ops[interface] = &iface_ops_xaui;
  1001. else
  1002. iface_ops[interface] = &iface_ops_dis;
  1003. } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
  1004. if (interface == 0) {
  1005. union cvmx_mio_qlmx_cfg qlm_cfg;
  1006. qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
  1007. if (qlm_cfg.s.qlm_cfg == 2)
  1008. iface_ops[interface] = &iface_ops_sgmii;
  1009. else
  1010. iface_ops[interface] = &iface_ops_dis;
  1011. } else {
  1012. iface_ops[interface] = &iface_ops_dis;
  1013. }
  1014. } else if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  1015. iface_ops[interface] = &iface_ops_dis;
  1016. } else {
  1017. mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
  1018. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  1019. switch (mode.cn63xx.mode) {
  1020. case 0:
  1021. iface_ops[interface] = &iface_ops_sgmii;
  1022. break;
  1023. case 1:
  1024. iface_ops[interface] = &iface_ops_xaui;
  1025. break;
  1026. default:
  1027. iface_ops[interface] = &iface_ops_dis;
  1028. break;
  1029. }
  1030. } else {
  1031. if (!mode.s.en)
  1032. iface_ops[interface] = &iface_ops_dis;
  1033. else if (mode.s.type)
  1034. iface_ops[interface] = &iface_ops_gmii;
  1035. else
  1036. iface_ops[interface] = &iface_ops_rgmii;
  1037. }
  1038. }
  1039. return iface_ops[interface]->mode;
  1040. }
  1041. /**
  1042. * Get the operating mode of an interface. Depending on the Octeon
  1043. * chip and configuration, this function returns an enumeration
  1044. * of the type of packet I/O supported by an interface.
  1045. *
  1046. * @param xiface Interface to probe
  1047. *
  1048. * Return: Mode of the interface. Unknown or unsupported interfaces return
  1049. * DISABLED.
  1050. */
  1051. cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int xiface)
  1052. {
  1053. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1054. if (xi.interface < 0 ||
  1055. xi.interface >= cvmx_helper_get_number_of_interfaces())
  1056. return CVMX_HELPER_INTERFACE_MODE_DISABLED;
  1057. /*
  1058. * Check if the interface mode has been already cached. If it has,
  1059. * simply return it. Otherwise, fall through the rest of the code to
  1060. * determine the interface mode and cache it in iface_ops.
  1061. */
  1062. if (iface_node_ops[xi.node][xi.interface]) {
  1063. cvmx_helper_interface_mode_t mode;
  1064. mode = iface_node_ops[xi.node][xi.interface]->mode;
  1065. return mode;
  1066. }
  1067. /*
  1068. * OCTEON III models
  1069. */
  1070. if (OCTEON_IS_MODEL(OCTEON_CN70XX))
  1071. return __cvmx_get_mode_cn70xx(xi.interface);
  1072. if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  1073. return __cvmx_get_mode_cn78xx(xiface);
  1074. if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
  1075. cvmx_helper_interface_mode_t mode;
  1076. mode = __cvmx_get_mode_cnf75xx(xiface);
  1077. return mode;
  1078. }
  1079. if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
  1080. cvmx_helper_interface_mode_t mode;
  1081. mode = __cvmx_get_mode_cn73xx(xiface);
  1082. return mode;
  1083. }
  1084. /*
  1085. * Octeon II models
  1086. */
  1087. if (OCTEON_IS_OCTEON2())
  1088. return __cvmx_get_mode_octeon2(xi.interface);
  1089. /*
  1090. * Octeon and Octeon Plus models
  1091. */
  1092. if (xi.interface == 2) {
  1093. iface_ops[xi.interface] = &iface_ops_npi;
  1094. } else if (xi.interface == 3) {
  1095. iface_ops[xi.interface] = &iface_ops_dis;
  1096. } else {
  1097. union cvmx_gmxx_inf_mode mode;
  1098. mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
  1099. if (!mode.s.en)
  1100. iface_ops[xi.interface] = &iface_ops_dis;
  1101. else if (mode.s.type)
  1102. iface_ops[xi.interface] = &iface_ops_gmii;
  1103. else
  1104. iface_ops[xi.interface] = &iface_ops_rgmii;
  1105. }
  1106. return iface_ops[xi.interface]->mode;
  1107. }
  1108. /**
  1109. * Determine the actual number of hardware ports connected to an
  1110. * interface. It doesn't setup the ports or enable them.
  1111. *
  1112. * @param xiface Interface to enumerate
  1113. *
  1114. * Return: The number of ports on the interface, negative on failure
  1115. */
  1116. int cvmx_helper_interface_enumerate(int xiface)
  1117. {
  1118. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1119. int result = 0;
  1120. cvmx_helper_interface_get_mode(xiface);
  1121. if (iface_node_ops[xi.node][xi.interface]->enumerate)
  1122. result = iface_node_ops[xi.node][xi.interface]->enumerate(xiface);
  1123. return result;
  1124. }
  1125. /**
  1126. * This function probes an interface to determine the actual number of
  1127. * hardware ports connected to it. It does some setup the ports but
  1128. * doesn't enable them. The main goal here is to set the global
  1129. * interface_port_count[interface] correctly. Final hardware setup of
  1130. * the ports will be performed later.
  1131. *
  1132. * @param xiface Interface to probe
  1133. *
  1134. * Return: Zero on success, negative on failure
  1135. */
  1136. int cvmx_helper_interface_probe(int xiface)
  1137. {
  1138. /*
  1139. * At this stage in the game we don't want packets to be
  1140. * moving yet. The following probe calls should perform
  1141. * hardware setup needed to determine port counts. Receive
  1142. * must still be disabled.
  1143. */
  1144. int nports;
  1145. int has_fcs;
  1146. enum cvmx_pko_padding padding = CVMX_PKO_PADDING_NONE;
  1147. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1148. nports = -1;
  1149. has_fcs = 0;
  1150. cvmx_helper_interface_get_mode(xiface);
  1151. if (iface_node_ops[xi.node][xi.interface]->probe)
  1152. nports = iface_node_ops[xi.node][xi.interface]->probe(xiface);
  1153. switch (iface_node_ops[xi.node][xi.interface]->mode) {
  1154. /* These types don't support ports to IPD/PKO */
  1155. case CVMX_HELPER_INTERFACE_MODE_DISABLED:
  1156. case CVMX_HELPER_INTERFACE_MODE_PCIE:
  1157. nports = 0;
  1158. break;
  1159. /* XAUI is a single high speed port */
  1160. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  1161. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  1162. case CVMX_HELPER_INTERFACE_MODE_XLAUI:
  1163. case CVMX_HELPER_INTERFACE_MODE_XFI:
  1164. case CVMX_HELPER_INTERFACE_MODE_10G_KR:
  1165. case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
  1166. case CVMX_HELPER_INTERFACE_MODE_MIXED:
  1167. has_fcs = 1;
  1168. padding = CVMX_PKO_PADDING_60;
  1169. break;
  1170. /*
  1171. * RGMII/GMII/MII are all treated about the same. Most
  1172. * functions refer to these ports as RGMII.
  1173. */
  1174. case CVMX_HELPER_INTERFACE_MODE_RGMII:
  1175. case CVMX_HELPER_INTERFACE_MODE_GMII:
  1176. padding = CVMX_PKO_PADDING_60;
  1177. break;
  1178. /*
  1179. * SPI4 can have 1-16 ports depending on the device at
  1180. * the other end.
  1181. */
  1182. case CVMX_HELPER_INTERFACE_MODE_SPI:
  1183. padding = CVMX_PKO_PADDING_60;
  1184. break;
  1185. /*
  1186. * SGMII can have 1-4 ports depending on how many are
  1187. * hooked up.
  1188. */
  1189. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  1190. case CVMX_HELPER_INTERFACE_MODE_QSGMII:
  1191. padding = CVMX_PKO_PADDING_60;
  1192. case CVMX_HELPER_INTERFACE_MODE_PICMG:
  1193. has_fcs = 1;
  1194. break;
  1195. /* PCI target Network Packet Interface */
  1196. case CVMX_HELPER_INTERFACE_MODE_NPI:
  1197. break;
  1198. /*
  1199. * Special loopback only ports. These are not the same
  1200. * as other ports in loopback mode.
  1201. */
  1202. case CVMX_HELPER_INTERFACE_MODE_LOOP:
  1203. break;
  1204. /* SRIO has 2^N ports, where N is number of interfaces */
  1205. case CVMX_HELPER_INTERFACE_MODE_SRIO:
  1206. break;
  1207. case CVMX_HELPER_INTERFACE_MODE_ILK:
  1208. padding = CVMX_PKO_PADDING_60;
  1209. has_fcs = 1;
  1210. break;
  1211. case CVMX_HELPER_INTERFACE_MODE_AGL:
  1212. has_fcs = 1;
  1213. break;
  1214. }
  1215. if (nports == -1)
  1216. return -1;
  1217. if (!octeon_has_feature(OCTEON_FEATURE_PKND))
  1218. has_fcs = 0;
  1219. nports = __cvmx_helper_board_interface_probe(xiface, nports);
  1220. __cvmx_helper_init_interface(xiface, nports, has_fcs, padding);
  1221. /* Make sure all global variables propagate to other cores */
  1222. CVMX_SYNCWS;
  1223. return 0;
  1224. }
  1225. /**
  1226. * @INTERNAL
  1227. * Setup backpressure.
  1228. *
  1229. * Return: Zero on success, negative on failure
  1230. */
  1231. static int __cvmx_helper_global_setup_backpressure(int node)
  1232. {
  1233. cvmx_qos_proto_t qos_proto;
  1234. cvmx_qos_pkt_mode_t qos_mode;
  1235. int port, xipdport;
  1236. unsigned int bpmask;
  1237. int interface, xiface, ports;
  1238. int num_interfaces = cvmx_helper_get_number_of_interfaces();
  1239. if (cvmx_rgmii_backpressure_dis) {
  1240. qos_proto = CVMX_QOS_PROTO_NONE;
  1241. qos_mode = CVMX_QOS_PKT_MODE_DROP;
  1242. } else {
  1243. qos_proto = CVMX_QOS_PROTO_PAUSE;
  1244. qos_mode = CVMX_QOS_PKT_MODE_HWONLY;
  1245. }
  1246. for (interface = 0; interface < num_interfaces; interface++) {
  1247. xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1248. ports = cvmx_helper_ports_on_interface(xiface);
  1249. switch (cvmx_helper_interface_get_mode(xiface)) {
  1250. case CVMX_HELPER_INTERFACE_MODE_DISABLED:
  1251. case CVMX_HELPER_INTERFACE_MODE_PCIE:
  1252. case CVMX_HELPER_INTERFACE_MODE_SRIO:
  1253. case CVMX_HELPER_INTERFACE_MODE_ILK:
  1254. case CVMX_HELPER_INTERFACE_MODE_NPI:
  1255. case CVMX_HELPER_INTERFACE_MODE_PICMG:
  1256. break;
  1257. case CVMX_HELPER_INTERFACE_MODE_LOOP:
  1258. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  1259. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  1260. case CVMX_HELPER_INTERFACE_MODE_XLAUI:
  1261. case CVMX_HELPER_INTERFACE_MODE_XFI:
  1262. case CVMX_HELPER_INTERFACE_MODE_10G_KR:
  1263. case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
  1264. bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
  1265. if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
  1266. for (port = 0; port < ports; port++) {
  1267. xipdport = cvmx_helper_get_ipd_port(xiface, port);
  1268. cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
  1269. }
  1270. cvmx_bgx_set_backpressure_override(xiface, bpmask);
  1271. }
  1272. break;
  1273. case CVMX_HELPER_INTERFACE_MODE_RGMII:
  1274. case CVMX_HELPER_INTERFACE_MODE_GMII:
  1275. case CVMX_HELPER_INTERFACE_MODE_SPI:
  1276. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  1277. case CVMX_HELPER_INTERFACE_MODE_QSGMII:
  1278. case CVMX_HELPER_INTERFACE_MODE_MIXED:
  1279. bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
  1280. if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
  1281. for (port = 0; port < ports; port++) {
  1282. xipdport = cvmx_helper_get_ipd_port(xiface, port);
  1283. cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
  1284. }
  1285. cvmx_bgx_set_backpressure_override(xiface, bpmask);
  1286. } else {
  1287. cvmx_gmx_set_backpressure_override(interface, bpmask);
  1288. }
  1289. break;
  1290. case CVMX_HELPER_INTERFACE_MODE_AGL:
  1291. bpmask = (cvmx_rgmii_backpressure_dis) ? 0x1 : 0;
  1292. cvmx_agl_set_backpressure_override(interface, bpmask);
  1293. break;
  1294. }
  1295. }
  1296. return 0;
  1297. }
  1298. /**
  1299. * @INTERNAL
  1300. * Verify the per port IPD backpressure is aligned properly.
  1301. * Return: Zero if working, non zero if misaligned
  1302. */
  1303. int __cvmx_helper_backpressure_is_misaligned(void)
  1304. {
  1305. return 0;
  1306. }
  1307. /**
  1308. * @INTERNAL
  1309. * Enable packet input/output from the hardware. This function is
  1310. * called after all internal setup is complete and IPD is enabled.
  1311. * After this function completes, packets will be accepted from the
  1312. * hardware ports. PKO should still be disabled to make sure packets
  1313. * aren't sent out partially setup hardware.
  1314. *
  1315. * @param xiface Interface to enable
  1316. *
  1317. * Return: Zero on success, negative on failure
  1318. */
  1319. int __cvmx_helper_packet_hardware_enable(int xiface)
  1320. {
  1321. int result = 0;
  1322. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1323. if (iface_node_ops[xi.node][xi.interface]->enable)
  1324. result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
  1325. result |= __cvmx_helper_board_hardware_enable(xiface);
  1326. return result;
  1327. }
  1328. int cvmx_helper_ipd_and_packet_input_enable(void)
  1329. {
  1330. return cvmx_helper_ipd_and_packet_input_enable_node(cvmx_get_node_num());
  1331. }
  1332. /**
  1333. * Called after all internal packet IO paths are setup. This
  1334. * function enables IPD/PIP and begins packet input and output.
  1335. *
  1336. * Return: Zero on success, negative on failure
  1337. */
  1338. int cvmx_helper_ipd_and_packet_input_enable_node(int node)
  1339. {
  1340. int num_interfaces;
  1341. int interface;
  1342. int num_ports;
  1343. if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
  1344. cvmx_helper_pki_enable(node);
  1345. } else {
  1346. /* Enable IPD */
  1347. cvmx_ipd_enable();
  1348. }
  1349. /*
  1350. * Time to enable hardware ports packet input and output. Note
  1351. * that at this point IPD/PIP must be fully functional and PKO
  1352. * must be disabled .
  1353. */
  1354. num_interfaces = cvmx_helper_get_number_of_interfaces();
  1355. for (interface = 0; interface < num_interfaces; interface++) {
  1356. int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1357. num_ports = cvmx_helper_ports_on_interface(xiface);
  1358. if (num_ports > 0)
  1359. __cvmx_helper_packet_hardware_enable(xiface);
  1360. }
  1361. /* Finally enable PKO now that the entire path is up and running */
  1362. /* enable pko */
  1363. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
  1364. ; // cvmx_pko_enable_78xx(0); already enabled
  1365. else
  1366. cvmx_pko_enable();
  1367. return 0;
  1368. }
  1369. /**
  1370. * Initialize the PIP, IPD, and PKO hardware to support
  1371. * simple priority based queues for the ethernet ports. Each
  1372. * port is configured with a number of priority queues based
  1373. * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
  1374. * priority than the previous.
  1375. *
  1376. * Return: Zero on success, non-zero on failure
  1377. */
  1378. int cvmx_helper_initialize_packet_io_node(unsigned int node)
  1379. {
  1380. int result = 0;
  1381. int interface;
  1382. int xiface;
  1383. union cvmx_l2c_cfg l2c_cfg;
  1384. union cvmx_smix_en smix_en;
  1385. const int num_interfaces = cvmx_helper_get_number_of_interfaces();
  1386. /*
  1387. * Tell L2 to give the IOB statically higher priority compared
  1388. * to the cores. This avoids conditions where IO blocks might
  1389. * be starved under very high L2 loads.
  1390. */
  1391. if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
  1392. union cvmx_l2c_ctl l2c_ctl;
  1393. l2c_ctl.u64 = csr_rd_node(node, CVMX_L2C_CTL);
  1394. l2c_ctl.s.rsp_arb_mode = 1;
  1395. l2c_ctl.s.xmc_arb_mode = 0;
  1396. csr_wr_node(node, CVMX_L2C_CTL, l2c_ctl.u64);
  1397. } else {
  1398. l2c_cfg.u64 = csr_rd(CVMX_L2C_CFG);
  1399. l2c_cfg.s.lrf_arb_mode = 0;
  1400. l2c_cfg.s.rfb_arb_mode = 0;
  1401. csr_wr(CVMX_L2C_CFG, l2c_cfg.u64);
  1402. }
  1403. int smi_inf;
  1404. int i;
  1405. /* Newer chips have more than one SMI/MDIO interface */
  1406. if (OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CN78XX))
  1407. smi_inf = 4;
  1408. else if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
  1409. smi_inf = 2;
  1410. else
  1411. smi_inf = 2;
  1412. for (i = 0; i < smi_inf; i++) {
  1413. /* Make sure SMI/MDIO is enabled so we can query PHYs */
  1414. smix_en.u64 = csr_rd_node(node, CVMX_SMIX_EN(i));
  1415. if (!smix_en.s.en) {
  1416. smix_en.s.en = 1;
  1417. csr_wr_node(node, CVMX_SMIX_EN(i), smix_en.u64);
  1418. }
  1419. }
  1420. //vinita_to_do ask it need to be modify for multinode
  1421. __cvmx_helper_init_port_valid();
  1422. for (interface = 0; interface < num_interfaces; interface++) {
  1423. xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1424. result |= cvmx_helper_interface_probe(xiface);
  1425. }
  1426. /* PKO3 init precedes that of interfaces */
  1427. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1428. __cvmx_helper_init_port_config_data(node);
  1429. result = cvmx_helper_pko3_init_global(node);
  1430. } else {
  1431. result = cvmx_helper_pko_init();
  1432. }
  1433. /* Errata SSO-29000, Disabling power saving SSO conditional clocking */
  1434. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1435. cvmx_sso_ws_cfg_t cfg;
  1436. cfg.u64 = csr_rd_node(node, CVMX_SSO_WS_CFG);
  1437. cfg.s.sso_cclk_dis = 1;
  1438. csr_wr_node(node, CVMX_SSO_WS_CFG, cfg.u64);
  1439. }
  1440. if (result < 0)
  1441. return result;
  1442. for (interface = 0; interface < num_interfaces; interface++) {
  1443. xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1444. /* Skip invalid/disabled interfaces */
  1445. if (cvmx_helper_ports_on_interface(xiface) <= 0)
  1446. continue;
  1447. printf("Node %d Interface %d has %d ports (%s)\n", node, interface,
  1448. cvmx_helper_ports_on_interface(xiface),
  1449. cvmx_helper_interface_mode_to_string(
  1450. cvmx_helper_interface_get_mode(xiface)));
  1451. result |= __cvmx_helper_ipd_setup_interface(xiface);
  1452. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
  1453. result |= cvmx_helper_pko3_init_interface(xiface);
  1454. else
  1455. result |= __cvmx_helper_interface_setup_pko(interface);
  1456. }
  1457. if (octeon_has_feature(OCTEON_FEATURE_PKI))
  1458. result |= __cvmx_helper_pki_global_setup(node);
  1459. else
  1460. result |= __cvmx_helper_ipd_global_setup();
  1461. /* Enable any flow control and backpressure */
  1462. result |= __cvmx_helper_global_setup_backpressure(node);
  1463. /* export app config if set */
  1464. if (cvmx_export_app_config)
  1465. result |= (*cvmx_export_app_config)();
  1466. if (cvmx_ipd_cfg.ipd_enable && cvmx_pki_dflt_init[node])
  1467. result |= cvmx_helper_ipd_and_packet_input_enable_node(node);
  1468. return result;
  1469. }
  1470. /**
  1471. * Initialize the PIP, IPD, and PKO hardware to support
  1472. * simple priority based queues for the ethernet ports. Each
  1473. * port is configured with a number of priority queues based
  1474. * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
  1475. * priority than the previous.
  1476. *
  1477. * Return: Zero on success, non-zero on failure
  1478. */
  1479. int cvmx_helper_initialize_packet_io_global(void)
  1480. {
  1481. unsigned int node = cvmx_get_node_num();
  1482. return cvmx_helper_initialize_packet_io_node(node);
  1483. }
  1484. /**
  1485. * Does core local initialization for packet io
  1486. *
  1487. * Return: Zero on success, non-zero on failure
  1488. */
  1489. int cvmx_helper_initialize_packet_io_local(void)
  1490. {
  1491. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
  1492. __cvmx_pko3_dq_table_setup();
  1493. return 0;
  1494. }
  1495. struct cvmx_buffer_list {
  1496. struct cvmx_buffer_list *next;
  1497. };
  1498. /**
  1499. * Disables the sending of flow control (pause) frames on the specified
  1500. * GMX port(s).
  1501. *
  1502. * @param interface Which interface (0 or 1)
  1503. * @param port_mask Mask (4bits) of which ports on the interface to disable
  1504. * backpressure on.
  1505. * 1 => disable backpressure
  1506. * 0 => enable backpressure
  1507. *
  1508. * Return: 0 on success
  1509. * -1 on error
  1510. */
  1511. int cvmx_gmx_set_backpressure_override(u32 interface, uint32_t port_mask)
  1512. {
  1513. union cvmx_gmxx_tx_ovr_bp gmxx_tx_ovr_bp;
  1514. /* Check for valid arguments */
  1515. if (port_mask & ~0xf || interface & ~0x1)
  1516. return -1;
  1517. if (interface >= CVMX_HELPER_MAX_GMX)
  1518. return -1;
  1519. gmxx_tx_ovr_bp.u64 = 0;
  1520. gmxx_tx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
  1521. gmxx_tx_ovr_bp.s.ign_full = port_mask; /* Ignore the RX FIFO full when computing BP */
  1522. csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmxx_tx_ovr_bp.u64);
  1523. return 0;
  1524. }
  1525. /**
  1526. * Disables the sending of flow control (pause) frames on the specified
  1527. * AGL (RGMII) port(s).
  1528. *
  1529. * @param interface Which interface (0 or 1)
  1530. * @param port_mask Mask (4bits) of which ports on the interface to disable
  1531. * backpressure on.
  1532. * 1 => disable backpressure
  1533. * 0 => enable backpressure
  1534. *
  1535. * Return: 0 on success
  1536. * -1 on error
  1537. */
  1538. int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask)
  1539. {
  1540. union cvmx_agl_gmx_tx_ovr_bp agl_gmx_tx_ovr_bp;
  1541. int port = cvmx_helper_agl_get_port(interface);
  1542. if (port == -1)
  1543. return -1;
  1544. /* Check for valid arguments */
  1545. agl_gmx_tx_ovr_bp.u64 = 0;
  1546. /* Per port Enable back pressure override */
  1547. agl_gmx_tx_ovr_bp.s.en = port_mask;
  1548. /* Ignore the RX FIFO full when computing BP */
  1549. agl_gmx_tx_ovr_bp.s.ign_full = port_mask;
  1550. csr_wr(CVMX_GMXX_TX_OVR_BP(port), agl_gmx_tx_ovr_bp.u64);
  1551. return 0;
  1552. }
  1553. /**
  1554. * Helper function for global packet IO shutdown
  1555. */
  1556. int cvmx_helper_shutdown_packet_io_global_cn78xx(int node)
  1557. {
  1558. int num_interfaces = cvmx_helper_get_number_of_interfaces();
  1559. cvmx_wqe_t *work;
  1560. int interface;
  1561. int result = 0;
  1562. /* Shut down all interfaces and disable TX and RX on all ports */
  1563. for (interface = 0; interface < num_interfaces; interface++) {
  1564. int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1565. int index;
  1566. int num_ports = cvmx_helper_ports_on_interface(xiface);
  1567. if (num_ports > 4)
  1568. num_ports = 4;
  1569. cvmx_bgx_set_backpressure_override(xiface, 0);
  1570. for (index = 0; index < num_ports; index++) {
  1571. cvmx_helper_link_info_t link_info;
  1572. if (!cvmx_helper_is_port_valid(xiface, index))
  1573. continue;
  1574. cvmx_helper_bgx_shutdown_port(xiface, index);
  1575. /* Turn off link LEDs */
  1576. link_info.u64 = 0;
  1577. cvmx_helper_update_link_led(xiface, index, link_info);
  1578. }
  1579. }
  1580. /* Stop input first */
  1581. cvmx_helper_pki_shutdown(node);
  1582. /* Retrieve all packets from the SSO and free them */
  1583. result = 0;
  1584. while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
  1585. cvmx_helper_free_pki_pkt_data(work);
  1586. cvmx_wqe_pki_free(work);
  1587. result++;
  1588. }
  1589. if (result > 0)
  1590. debug("%s: Purged %d packets from SSO\n", __func__, result);
  1591. /*
  1592. * No need to wait for PKO queues to drain,
  1593. * dq_close() drains the queues to NULL.
  1594. */
  1595. /* Shutdown PKO interfaces */
  1596. for (interface = 0; interface < num_interfaces; interface++) {
  1597. int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1598. cvmx_helper_pko3_shut_interface(xiface);
  1599. }
  1600. /* Disable MAC address filtering */
  1601. for (interface = 0; interface < num_interfaces; interface++) {
  1602. int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1603. switch (cvmx_helper_interface_get_mode(xiface)) {
  1604. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  1605. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  1606. case CVMX_HELPER_INTERFACE_MODE_XLAUI:
  1607. case CVMX_HELPER_INTERFACE_MODE_XFI:
  1608. case CVMX_HELPER_INTERFACE_MODE_10G_KR:
  1609. case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
  1610. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  1611. case CVMX_HELPER_INTERFACE_MODE_MIXED: {
  1612. int index;
  1613. int num_ports = cvmx_helper_ports_on_interface(xiface);
  1614. for (index = 0; index < num_ports; index++) {
  1615. if (!cvmx_helper_is_port_valid(xiface, index))
  1616. continue;
  1617. /* Reset MAC filtering */
  1618. cvmx_helper_bgx_rx_adr_ctl(node, interface, index, 0, 0, 0);
  1619. }
  1620. break;
  1621. }
  1622. default:
  1623. break;
  1624. }
  1625. }
  1626. for (interface = 0; interface < num_interfaces; interface++) {
  1627. int index;
  1628. int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1629. int num_ports = cvmx_helper_ports_on_interface(xiface);
  1630. for (index = 0; index < num_ports; index++) {
  1631. /* Doing this twice should clear it since no packets
  1632. * can be received.
  1633. */
  1634. cvmx_update_rx_activity_led(xiface, index, false);
  1635. cvmx_update_rx_activity_led(xiface, index, false);
  1636. }
  1637. }
  1638. /* Shutdown the PKO unit */
  1639. result = cvmx_helper_pko3_shutdown(node);
  1640. /* Release interface structures */
  1641. __cvmx_helper_shutdown_interfaces();
  1642. return result;
  1643. }
  1644. /**
  1645. * Undo the initialization performed in
  1646. * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
  1647. * local version on each core, packet IO for Octeon will be disabled and placed
  1648. * in the initial reset state. It will then be safe to call the initialize
  1649. * later on. Note that this routine does not empty the FPA pools. It frees all
  1650. * buffers used by the packet IO hardware to the FPA so a function emptying the
  1651. * FPA after shutdown should find all packet buffers in the FPA.
  1652. *
  1653. * Return: Zero on success, negative on failure.
  1654. */
  1655. int cvmx_helper_shutdown_packet_io_global(void)
  1656. {
  1657. const int timeout = 5; /* Wait up to 5 seconds for timeouts */
  1658. int result = 0;
  1659. int num_interfaces = cvmx_helper_get_number_of_interfaces();
  1660. int interface;
  1661. int num_ports;
  1662. int index;
  1663. struct cvmx_buffer_list *pool0_buffers;
  1664. struct cvmx_buffer_list *pool0_buffers_tail;
  1665. cvmx_wqe_t *work;
  1666. union cvmx_ipd_ctl_status ipd_ctl_status;
  1667. int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
  1668. int node = cvmx_get_node_num();
  1669. cvmx_pcsx_mrx_control_reg_t control_reg;
  1670. if (octeon_has_feature(OCTEON_FEATURE_BGX))
  1671. return cvmx_helper_shutdown_packet_io_global_cn78xx(node);
  1672. /* Step 1: Disable all backpressure */
  1673. for (interface = 0; interface < num_interfaces; interface++) {
  1674. cvmx_helper_interface_mode_t mode =
  1675. cvmx_helper_interface_get_mode(interface);
  1676. if (mode == CVMX_HELPER_INTERFACE_MODE_AGL)
  1677. cvmx_agl_set_backpressure_override(interface, 0x1);
  1678. else if (mode != CVMX_HELPER_INTERFACE_MODE_DISABLED)
  1679. cvmx_gmx_set_backpressure_override(interface, 0xf);
  1680. }
  1681. /* Step 2: Wait for the PKO queues to drain */
  1682. result = __cvmx_helper_pko_drain();
  1683. if (result < 0) {
  1684. debug("WARNING: %s: Failed to drain some PKO queues\n",
  1685. __func__);
  1686. }
  1687. /* Step 3: Disable TX and RX on all ports */
  1688. for (interface = 0; interface < num_interfaces; interface++) {
  1689. int xiface = cvmx_helper_node_interface_to_xiface(node,
  1690. interface);
  1691. switch (cvmx_helper_interface_get_mode(interface)) {
  1692. case CVMX_HELPER_INTERFACE_MODE_DISABLED:
  1693. case CVMX_HELPER_INTERFACE_MODE_PCIE:
  1694. /* Not a packet interface */
  1695. break;
  1696. case CVMX_HELPER_INTERFACE_MODE_NPI:
  1697. case CVMX_HELPER_INTERFACE_MODE_SRIO:
  1698. case CVMX_HELPER_INTERFACE_MODE_ILK:
  1699. /*
  1700. * We don't handle the NPI/NPEI/SRIO packet
  1701. * engines. The caller must know these are
  1702. * idle.
  1703. */
  1704. break;
  1705. case CVMX_HELPER_INTERFACE_MODE_LOOP:
  1706. /*
  1707. * Nothing needed. Once PKO is idle, the
  1708. * loopback devices must be idle.
  1709. */
  1710. break;
  1711. case CVMX_HELPER_INTERFACE_MODE_SPI:
  1712. /*
  1713. * SPI cannot be disabled from Octeon. It is
  1714. * the responsibility of the caller to make
  1715. * sure SPI is idle before doing shutdown.
  1716. *
  1717. * Fall through and do the same processing as
  1718. * RGMII/GMII.
  1719. */
  1720. fallthrough;
  1721. case CVMX_HELPER_INTERFACE_MODE_GMII:
  1722. case CVMX_HELPER_INTERFACE_MODE_RGMII:
  1723. /* Disable outermost RX at the ASX block */
  1724. csr_wr(CVMX_ASXX_RX_PRT_EN(interface), 0);
  1725. num_ports = cvmx_helper_ports_on_interface(xiface);
  1726. if (num_ports > 4)
  1727. num_ports = 4;
  1728. for (index = 0; index < num_ports; index++) {
  1729. union cvmx_gmxx_prtx_cfg gmx_cfg;
  1730. if (!cvmx_helper_is_port_valid(interface, index))
  1731. continue;
  1732. gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
  1733. gmx_cfg.s.en = 0;
  1734. csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
  1735. /* Poll the GMX state machine waiting for it to become idle */
  1736. csr_wr(CVMX_NPI_DBG_SELECT,
  1737. interface * 0x800 + index * 0x100 + 0x880);
  1738. if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
  1739. data & 7, ==, 0, timeout * 1000000)) {
  1740. debug("GMX RX path timeout waiting for idle\n");
  1741. result = -1;
  1742. }
  1743. if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
  1744. data & 0xf, ==, 0, timeout * 1000000)) {
  1745. debug("GMX TX path timeout waiting for idle\n");
  1746. result = -1;
  1747. }
  1748. }
  1749. /* Disable outermost TX at the ASX block */
  1750. csr_wr(CVMX_ASXX_TX_PRT_EN(interface), 0);
  1751. /* Disable interrupts for interface */
  1752. csr_wr(CVMX_ASXX_INT_EN(interface), 0);
  1753. csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0);
  1754. break;
  1755. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  1756. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  1757. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  1758. case CVMX_HELPER_INTERFACE_MODE_QSGMII:
  1759. case CVMX_HELPER_INTERFACE_MODE_PICMG:
  1760. num_ports = cvmx_helper_ports_on_interface(xiface);
  1761. if (num_ports > 4)
  1762. num_ports = 4;
  1763. for (index = 0; index < num_ports; index++) {
  1764. union cvmx_gmxx_prtx_cfg gmx_cfg;
  1765. if (!cvmx_helper_is_port_valid(interface, index))
  1766. continue;
  1767. gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
  1768. gmx_cfg.s.en = 0;
  1769. csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
  1770. if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
  1771. union cvmx_gmxx_prtx_cfg, rx_idle, ==, 1,
  1772. timeout * 1000000)) {
  1773. debug("GMX RX path timeout waiting for idle\n");
  1774. result = -1;
  1775. }
  1776. if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
  1777. union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
  1778. timeout * 1000000)) {
  1779. debug("GMX TX path timeout waiting for idle\n");
  1780. result = -1;
  1781. }
  1782. /* For SGMII some PHYs require that the PCS
  1783. * interface be powered down and reset (i.e.
  1784. * Atheros/Qualcomm PHYs).
  1785. */
  1786. if (cvmx_helper_interface_get_mode(interface) ==
  1787. CVMX_HELPER_INTERFACE_MODE_SGMII) {
  1788. u64 reg;
  1789. reg = CVMX_PCSX_MRX_CONTROL_REG(index, interface);
  1790. /* Power down the interface */
  1791. control_reg.u64 = csr_rd(reg);
  1792. control_reg.s.pwr_dn = 1;
  1793. csr_wr(reg, control_reg.u64);
  1794. csr_rd(reg);
  1795. }
  1796. }
  1797. break;
  1798. case CVMX_HELPER_INTERFACE_MODE_AGL: {
  1799. int port = cvmx_helper_agl_get_port(interface);
  1800. union cvmx_agl_gmx_prtx_cfg agl_gmx_cfg;
  1801. agl_gmx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
  1802. agl_gmx_cfg.s.en = 0;
  1803. csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_cfg.u64);
  1804. if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
  1805. union cvmx_agl_gmx_prtx_cfg, rx_idle, ==, 1,
  1806. timeout * 1000000)) {
  1807. debug("AGL RX path timeout waiting for idle\n");
  1808. result = -1;
  1809. }
  1810. if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
  1811. union cvmx_agl_gmx_prtx_cfg, tx_idle, ==, 1,
  1812. timeout * 1000000)) {
  1813. debug("AGL TX path timeout waiting for idle\n");
  1814. result = -1;
  1815. }
  1816. } break;
  1817. default:
  1818. break;
  1819. }
  1820. }
  1821. /* Step 4: Retrieve all packets from the POW and free them */
  1822. while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
  1823. cvmx_helper_free_packet_data(work);
  1824. cvmx_fpa1_free(work, wqe_pool, 0);
  1825. }
  1826. /* Step 5 */
  1827. cvmx_ipd_disable();
  1828. /*
  1829. * Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
  1830. * have not been reset yet
  1831. */
  1832. __cvmx_ipd_free_ptr();
  1833. /* Step 7: Free the PKO command buffers and put PKO in reset */
  1834. cvmx_pko_shutdown();
  1835. /* Step 8: Disable MAC address filtering */
  1836. for (interface = 0; interface < num_interfaces; interface++) {
  1837. int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  1838. switch (cvmx_helper_interface_get_mode(interface)) {
  1839. case CVMX_HELPER_INTERFACE_MODE_DISABLED:
  1840. case CVMX_HELPER_INTERFACE_MODE_PCIE:
  1841. case CVMX_HELPER_INTERFACE_MODE_SRIO:
  1842. case CVMX_HELPER_INTERFACE_MODE_ILK:
  1843. case CVMX_HELPER_INTERFACE_MODE_NPI:
  1844. case CVMX_HELPER_INTERFACE_MODE_LOOP:
  1845. break;
  1846. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  1847. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  1848. case CVMX_HELPER_INTERFACE_MODE_GMII:
  1849. case CVMX_HELPER_INTERFACE_MODE_RGMII:
  1850. case CVMX_HELPER_INTERFACE_MODE_SPI:
  1851. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  1852. case CVMX_HELPER_INTERFACE_MODE_QSGMII:
  1853. case CVMX_HELPER_INTERFACE_MODE_PICMG:
  1854. num_ports = cvmx_helper_ports_on_interface(xiface);
  1855. if (num_ports > 4)
  1856. num_ports = 4;
  1857. for (index = 0; index < num_ports; index++) {
  1858. if (!cvmx_helper_is_port_valid(interface, index))
  1859. continue;
  1860. csr_wr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
  1861. csr_wr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
  1862. csr_wr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
  1863. csr_wr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
  1864. csr_wr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
  1865. csr_wr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
  1866. csr_wr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
  1867. csr_wr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
  1868. }
  1869. break;
  1870. case CVMX_HELPER_INTERFACE_MODE_AGL: {
  1871. int port = cvmx_helper_agl_get_port(interface);
  1872. csr_wr(CVMX_AGL_GMX_RXX_ADR_CTL(port), 1);
  1873. csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
  1874. csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), 0);
  1875. csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), 0);
  1876. csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), 0);
  1877. csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), 0);
  1878. csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), 0);
  1879. csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), 0);
  1880. } break;
  1881. default:
  1882. break;
  1883. }
  1884. }
  1885. /*
  1886. * Step 9: Drain all FPA buffers out of pool 0 before we reset
  1887. * IPD/PIP. This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
  1888. * sync. We temporarily keep the buffers in the pool0_buffers
  1889. * list.
  1890. */
  1891. pool0_buffers = NULL;
  1892. pool0_buffers_tail = NULL;
  1893. while (1) {
  1894. struct cvmx_buffer_list *buffer = cvmx_fpa1_alloc(0);
  1895. if (buffer) {
  1896. buffer->next = NULL;
  1897. if (!pool0_buffers)
  1898. pool0_buffers = buffer;
  1899. else
  1900. pool0_buffers_tail->next = buffer;
  1901. pool0_buffers_tail = buffer;
  1902. } else {
  1903. break;
  1904. }
  1905. }
  1906. /* Step 10: Reset IPD and PIP */
  1907. ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
  1908. ipd_ctl_status.s.reset = 1;
  1909. csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
  1910. /* Make sure IPD has finished reset. */
  1911. if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
  1912. if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, union cvmx_ipd_ctl_status, rst_done,
  1913. ==, 0, 1000)) {
  1914. debug("IPD reset timeout waiting for idle\n");
  1915. result = -1;
  1916. }
  1917. }
  1918. /* Step 11: Restore the FPA buffers into pool 0 */
  1919. while (pool0_buffers) {
  1920. struct cvmx_buffer_list *n = pool0_buffers->next;
  1921. cvmx_fpa1_free(pool0_buffers, 0, 0);
  1922. pool0_buffers = n;
  1923. }
  1924. /* Step 12: Release interface structures */
  1925. __cvmx_helper_shutdown_interfaces();
  1926. return result;
  1927. }
  1928. /**
  1929. * Does core local shutdown of packet io
  1930. *
  1931. * Return: Zero on success, non-zero on failure
  1932. */
  1933. int cvmx_helper_shutdown_packet_io_local(void)
  1934. {
  1935. /*
  1936. * Currently there is nothing to do per core. This may change
  1937. * in the future.
  1938. */
  1939. return 0;
  1940. }
  1941. /**
  1942. * Auto configure an IPD/PKO port link state and speed. This
  1943. * function basically does the equivalent of:
  1944. * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
  1945. *
  1946. * @param xipd_port IPD/PKO port to auto configure
  1947. *
  1948. * Return: Link state after configure
  1949. */
  1950. cvmx_helper_link_info_t cvmx_helper_link_autoconf(int xipd_port)
  1951. {
  1952. cvmx_helper_link_info_t link_info;
  1953. int xiface = cvmx_helper_get_interface_num(xipd_port);
  1954. int index = cvmx_helper_get_interface_index_num(xipd_port);
  1955. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1956. int interface = xi.interface;
  1957. if (interface == -1 || index == -1 || index >= cvmx_helper_ports_on_interface(xiface)) {
  1958. link_info.u64 = 0;
  1959. return link_info;
  1960. }
  1961. link_info = cvmx_helper_link_get(xipd_port);
  1962. if (link_info.u64 == (__cvmx_helper_get_link_info(xiface, index)).u64)
  1963. return link_info;
  1964. if (!link_info.s.link_up)
  1965. cvmx_error_disable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
  1966. /* If we fail to set the link speed, port_link_info will not change */
  1967. cvmx_helper_link_set(xipd_port, link_info);
  1968. if (link_info.s.link_up)
  1969. cvmx_error_enable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
  1970. return link_info;
  1971. }
  1972. /**
  1973. * Return the link state of an IPD/PKO port as returned by
  1974. * auto negotiation. The result of this function may not match
  1975. * Octeon's link config if auto negotiation has changed since
  1976. * the last call to cvmx_helper_link_set().
  1977. *
  1978. * @param xipd_port IPD/PKO port to query
  1979. *
  1980. * Return: Link state
  1981. */
  1982. cvmx_helper_link_info_t cvmx_helper_link_get(int xipd_port)
  1983. {
  1984. cvmx_helper_link_info_t result;
  1985. int xiface = cvmx_helper_get_interface_num(xipd_port);
  1986. int index = cvmx_helper_get_interface_index_num(xipd_port);
  1987. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1988. struct cvmx_fdt_sfp_info *sfp_info;
  1989. /*
  1990. * The default result will be a down link unless the code
  1991. * below changes it.
  1992. */
  1993. result.u64 = 0;
  1994. if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
  1995. index >= cvmx_helper_ports_on_interface(xiface)) {
  1996. return result;
  1997. }
  1998. if (iface_node_ops[xi.node][xi.interface]->link_get)
  1999. result = iface_node_ops[xi.node][xi.interface]->link_get(xipd_port);
  2000. if (xipd_port >= 0) {
  2001. cvmx_helper_update_link_led(xiface, index, result);
  2002. sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
  2003. while (sfp_info) {
  2004. if ((!result.s.link_up || (result.s.link_up && sfp_info->last_mod_abs)))
  2005. cvmx_sfp_check_mod_abs(sfp_info, sfp_info->mod_abs_data);
  2006. sfp_info = sfp_info->next_iface_sfp;
  2007. }
  2008. }
  2009. return result;
  2010. }
  2011. /**
  2012. * Configure an IPD/PKO port for the specified link state. This
  2013. * function does not influence auto negotiation at the PHY level.
  2014. * The passed link state must always match the link state returned
  2015. * by cvmx_helper_link_get(). It is normally best to use
  2016. * cvmx_helper_link_autoconf() instead.
  2017. *
  2018. * @param xipd_port IPD/PKO port to configure
  2019. * @param link_info The new link state
  2020. *
  2021. * Return: Zero on success, negative on failure
  2022. */
  2023. int cvmx_helper_link_set(int xipd_port, cvmx_helper_link_info_t link_info)
  2024. {
  2025. int result = -1;
  2026. int xiface = cvmx_helper_get_interface_num(xipd_port);
  2027. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  2028. int index = cvmx_helper_get_interface_index_num(xipd_port);
  2029. if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
  2030. index >= cvmx_helper_ports_on_interface(xiface))
  2031. return -1;
  2032. if (iface_node_ops[xi.node][xi.interface]->link_set)
  2033. result = iface_node_ops[xi.node][xi.interface]->link_set(xipd_port, link_info);
  2034. /*
  2035. * Set the port_link_info here so that the link status is
  2036. * updated no matter how cvmx_helper_link_set is called. We
  2037. * don't change the value if link_set failed.
  2038. */
  2039. if (result == 0)
  2040. __cvmx_helper_set_link_info(xiface, index, link_info);
  2041. return result;
  2042. }
  2043. /**
  2044. * Configure a port for internal and/or external loopback. Internal loopback
  2045. * causes packets sent by the port to be received by Octeon. External loopback
  2046. * causes packets received from the wire to sent out again.
  2047. *
  2048. * @param xipd_port IPD/PKO port to loopback.
  2049. * @param enable_internal
  2050. * Non zero if you want internal loopback
  2051. * @param enable_external
  2052. * Non zero if you want external loopback
  2053. *
  2054. * Return: Zero on success, negative on failure.
  2055. */
  2056. int cvmx_helper_configure_loopback(int xipd_port, int enable_internal, int enable_external)
  2057. {
  2058. int result = -1;
  2059. int xiface = cvmx_helper_get_interface_num(xipd_port);
  2060. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  2061. int index = cvmx_helper_get_interface_index_num(xipd_port);
  2062. if (index >= cvmx_helper_ports_on_interface(xiface))
  2063. return -1;
  2064. cvmx_helper_interface_get_mode(xiface);
  2065. if (iface_node_ops[xi.node][xi.interface]->loopback)
  2066. result = iface_node_ops[xi.node][xi.interface]->loopback(xipd_port, enable_internal,
  2067. enable_external);
  2068. return result;
  2069. }
  2070. void cvmx_helper_setup_simulator_io_buffer_counts(int node, int num_packet_buffers, int pko_buffers)
  2071. {
  2072. if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
  2073. cvmx_helper_pki_set_dflt_pool_buffer(node, num_packet_buffers);
  2074. cvmx_helper_pki_set_dflt_aura_buffer(node, num_packet_buffers);
  2075. } else {
  2076. cvmx_ipd_set_packet_pool_buffer_count(num_packet_buffers);
  2077. cvmx_ipd_set_wqe_pool_buffer_count(num_packet_buffers);
  2078. cvmx_pko_set_cmd_queue_pool_buffer_count(pko_buffers);
  2079. }
  2080. }
  2081. void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
  2082. {
  2083. s64 paddr;
  2084. paddr = cvmx_bootmem_phy_alloc_range(alloc_size, align, cvmx_addr_on_node(node, 0ull),
  2085. cvmx_addr_on_node(node, 0xffffffffff));
  2086. if (paddr <= 0ll) {
  2087. printf("ERROR: %s failed size %u\n", __func__, (unsigned int)alloc_size);
  2088. return NULL;
  2089. }
  2090. return cvmx_phys_to_ptr(paddr);
  2091. }
  2092. void cvmx_helper_mem_free(void *buffer, uint64_t size)
  2093. {
  2094. __cvmx_bootmem_phy_free(cvmx_ptr_to_phys(buffer), size, 0);
  2095. }
  2096. int cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto, cvmx_qos_config_t *qos_cfg)
  2097. {
  2098. int i;
  2099. memset(qos_cfg, 0, sizeof(cvmx_qos_config_t));
  2100. qos_cfg->pkt_mode = CVMX_QOS_PKT_MODE_HWONLY; /* Process PAUSEs in hardware only.*/
  2101. qos_cfg->pool_mode = CVMX_QOS_POOL_PER_PORT; /* One Pool per BGX:LMAC.*/
  2102. qos_cfg->pktbuf_size = 2048; /* Fit WQE + MTU in one buffer.*/
  2103. qos_cfg->aura_size = 1024; /* 1K buffers typically enough for any application.*/
  2104. qos_cfg->pko_pfc_en = 1; /* Enable PKO layout for PFC feature. */
  2105. qos_cfg->vlan_num = 1; /* For Stacked VLAN, use 2nd VLAN in the QPG algorithm.*/
  2106. qos_cfg->qos_proto = qos_proto; /* Use PFC flow-control protocol.*/
  2107. qos_cfg->qpg_base = -1; /* QPG Table index is undefined.*/
  2108. qos_cfg->p_time = 0x60; /* PAUSE packets time window.*/
  2109. qos_cfg->p_interval = 0x10; /* PAUSE packets interval.*/
  2110. for (i = 0; i < CVMX_QOS_NUM; i++) {
  2111. qos_cfg->groups[i] = i; /* SSO Groups = 0...7 */
  2112. qos_cfg->group_prio[i] = i; /* SSO Group priority = QOS. */
  2113. qos_cfg->drop_thresh[i] = 99; /* 99% of the Aura size.*/
  2114. qos_cfg->red_thresh[i] = 90; /* 90% of the Aura size.*/
  2115. qos_cfg->bp_thresh[i] = 70; /* 70% of the Aura size.*/
  2116. }
  2117. return 0;
  2118. }
  2119. int cvmx_helper_qos_port_config_update(int xipdport, cvmx_qos_config_t *qos_cfg)
  2120. {
  2121. cvmx_user_static_pko_queue_config_t pkocfg;
  2122. cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
  2123. int xiface = cvmx_helper_get_interface_num(xipdport);
  2124. cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
  2125. /* Configure PKO port for PFC SQ layout: */
  2126. cvmx_helper_pko_queue_config_get(xp.node, &pkocfg);
  2127. pkocfg.pknd.pko_cfg_iface[xi.interface].pfc_enable = 1;
  2128. cvmx_helper_pko_queue_config_set(xp.node, &pkocfg);
  2129. return 0;
  2130. }
  2131. int cvmx_helper_qos_port_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
  2132. {
  2133. const int channles = CVMX_QOS_NUM;
  2134. int bufsize = qos_cfg->pktbuf_size;
  2135. int aura_size = qos_cfg->aura_size;
  2136. cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
  2137. int node = xp.node;
  2138. int ipdport = xp.port;
  2139. int port = cvmx_helper_get_interface_index_num(xp.port);
  2140. int xiface = cvmx_helper_get_interface_num(xipdport);
  2141. cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
  2142. cvmx_fpa3_pool_t gpool;
  2143. cvmx_fpa3_gaura_t gaura;
  2144. cvmx_bgxx_cmr_rx_ovr_bp_t ovrbp;
  2145. struct cvmx_pki_qpg_config qpgcfg;
  2146. struct cvmx_pki_style_config stcfg, stcfg_dflt;
  2147. struct cvmx_pki_pkind_config pkcfg;
  2148. int chan, bpid, group, qpg;
  2149. int bpen, reden, dropen, passthr, dropthr, bpthr;
  2150. int nbufs, pkind, style;
  2151. char name[32];
  2152. if (qos_cfg->pool_mode == CVMX_QOS_POOL_PER_PORT) {
  2153. /* Allocate and setup packet Pool: */
  2154. nbufs = aura_size * channles;
  2155. sprintf(name, "QOS.P%d", ipdport);
  2156. gpool = cvmx_fpa3_setup_fill_pool(node, -1 /*auto*/, name, bufsize, nbufs, NULL);
  2157. if (!__cvmx_fpa3_pool_valid(gpool)) {
  2158. printf("%s: Failed to setup FPA Pool\n", __func__);
  2159. return -1;
  2160. }
  2161. for (chan = 0; chan < channles; chan++)
  2162. qos_cfg->gpools[chan] = gpool;
  2163. } else {
  2164. printf("%s: Invalid pool_mode %d\n", __func__, qos_cfg->pool_mode);
  2165. return -1;
  2166. }
  2167. /* Allocate QPG entries: */
  2168. qos_cfg->qpg_base = cvmx_pki_qpg_entry_alloc(node, -1 /*auto*/, channles);
  2169. if (qos_cfg->qpg_base < 0) {
  2170. printf("%s: Failed to allocate QPG entry\n", __func__);
  2171. return -1;
  2172. }
  2173. for (chan = 0; chan < channles; chan++) {
  2174. /* Allocate and setup Aura, setup BP threshold: */
  2175. gpool = qos_cfg->gpools[chan];
  2176. sprintf(name, "QOS.A%d", ipdport + chan);
  2177. gaura = cvmx_fpa3_set_aura_for_pool(gpool, -1 /*auto*/, name, bufsize, aura_size);
  2178. if (!__cvmx_fpa3_aura_valid(gaura)) {
  2179. printf("%s: Failed to setup FPA Aura for Channel %d\n", __func__, chan);
  2180. return -1;
  2181. }
  2182. qos_cfg->gauras[chan] = gaura;
  2183. bpen = 1;
  2184. reden = 1;
  2185. dropen = 1;
  2186. dropthr = (qos_cfg->drop_thresh[chan] * 10 * aura_size) / 1000;
  2187. passthr = (qos_cfg->red_thresh[chan] * 10 * aura_size) / 1000;
  2188. bpthr = (qos_cfg->bp_thresh[chan] * 10 * aura_size) / 1000;
  2189. cvmx_fpa3_setup_aura_qos(gaura, reden, passthr, dropthr, bpen, bpthr);
  2190. cvmx_pki_enable_aura_qos(node, gaura.laura, reden, dropen, bpen);
  2191. /* Allocate BPID, link Aura and Channel using BPID: */
  2192. bpid = cvmx_pki_bpid_alloc(node, -1 /*auto*/);
  2193. if (bpid < 0) {
  2194. printf("%s: Failed to allocate BPID for channel %d\n",
  2195. __func__, chan);
  2196. return -1;
  2197. }
  2198. qos_cfg->bpids[chan] = bpid;
  2199. cvmx_pki_write_aura_bpid(node, gaura.laura, bpid);
  2200. cvmx_pki_write_channel_bpid(node, ipdport + chan, bpid);
  2201. /* Setup QPG entries: */
  2202. group = qos_cfg->groups[chan];
  2203. qpg = qos_cfg->qpg_base + chan;
  2204. cvmx_pki_read_qpg_entry(node, qpg, &qpgcfg);
  2205. qpgcfg.port_add = chan;
  2206. qpgcfg.aura_num = gaura.laura;
  2207. qpgcfg.grp_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
  2208. qpgcfg.grp_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
  2209. qpgcfg.grptag_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
  2210. qpgcfg.grptag_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
  2211. cvmx_pki_write_qpg_entry(node, qpg, &qpgcfg);
  2212. }
  2213. /* Allocate and setup STYLE: */
  2214. cvmx_helper_pki_get_dflt_style(node, &stcfg_dflt);
  2215. style = cvmx_pki_style_alloc(node, -1 /*auto*/);
  2216. cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
  2217. stcfg.tag_cfg = stcfg_dflt.tag_cfg;
  2218. stcfg.parm_cfg.tag_type = CVMX_POW_TAG_TYPE_ORDERED;
  2219. stcfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN;
  2220. stcfg.parm_cfg.qpg_base = qos_cfg->qpg_base;
  2221. stcfg.parm_cfg.qpg_port_msb = 0;
  2222. stcfg.parm_cfg.qpg_port_sh = 0;
  2223. stcfg.parm_cfg.qpg_dis_grptag = 1;
  2224. stcfg.parm_cfg.fcs_strip = 1;
  2225. stcfg.parm_cfg.mbuff_size = bufsize - 64; /* Do not use 100% of the buffer. */
  2226. stcfg.parm_cfg.force_drop = 0;
  2227. stcfg.parm_cfg.nodrop = 0;
  2228. stcfg.parm_cfg.rawdrp = 0;
  2229. stcfg.parm_cfg.cache_mode = 2; /* 1st buffer in L2 */
  2230. stcfg.parm_cfg.wqe_vs = qos_cfg->vlan_num;
  2231. cvmx_pki_write_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
  2232. /* Setup PKIND: */
  2233. pkind = cvmx_helper_get_pknd(xiface, port);
  2234. cvmx_pki_read_pkind_config(node, pkind, &pkcfg);
  2235. pkcfg.cluster_grp = 0; /* OCTEON3 has only one cluster group = 0 */
  2236. pkcfg.initial_style = style;
  2237. pkcfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG;
  2238. cvmx_pki_write_pkind_config(node, pkind, &pkcfg);
  2239. /* Setup parameters of the QOS packet and enable QOS flow-control: */
  2240. cvmx_bgx_set_pause_pkt_param(xipdport, 0, 0x0180c2000001, 0x8808, qos_cfg->p_time,
  2241. qos_cfg->p_interval);
  2242. cvmx_bgx_set_flowctl_mode(xipdport, qos_cfg->qos_proto, qos_cfg->pkt_mode);
  2243. /* Enable PKI channel backpressure in the BGX: */
  2244. ovrbp.u64 = csr_rd_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface));
  2245. ovrbp.s.en &= ~(1 << port);
  2246. ovrbp.s.ign_fifo_bp &= ~(1 << port);
  2247. csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), ovrbp.u64);
  2248. return 0;
  2249. }
  2250. int cvmx_helper_qos_sso_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
  2251. {
  2252. const int channels = CVMX_QOS_NUM;
  2253. cvmx_sso_grpx_pri_t grppri;
  2254. int chan, qos, group;
  2255. cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
  2256. int node = xp.node;
  2257. for (chan = 0; chan < channels; chan++) {
  2258. qos = cvmx_helper_qos2prio(chan);
  2259. group = qos_cfg->groups[qos];
  2260. grppri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
  2261. grppri.s.pri = qos_cfg->group_prio[chan];
  2262. csr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grppri.u64);
  2263. }
  2264. return 0;
  2265. }
  2266. int cvmx_helper_get_chan_e_name(int chan, char *namebuf, int buflen)
  2267. {
  2268. int n, dpichans;
  2269. if ((unsigned int)chan >= CVMX_PKO3_IPD_NUM_MAX) {
  2270. printf("%s: Channel %d is out of range (0..4095)\n", __func__, chan);
  2271. return -1;
  2272. }
  2273. if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  2274. dpichans = 64;
  2275. else
  2276. dpichans = 128;
  2277. if (chan >= 0 && chan < 64)
  2278. n = snprintf(namebuf, buflen, "LBK%d", chan);
  2279. else if (chan >= 0x100 && chan < (0x100 + dpichans))
  2280. n = snprintf(namebuf, buflen, "DPI%d", chan - 0x100);
  2281. else if (chan == 0x200)
  2282. n = snprintf(namebuf, buflen, "NQM");
  2283. else if (chan >= 0x240 && chan < (0x240 + (1 << 1) + 2))
  2284. n = snprintf(namebuf, buflen, "SRIO%d:%d", (chan - 0x240) >> 1,
  2285. (chan - 0x240) & 0x1);
  2286. else if (chan >= 0x400 && chan < (0x400 + (1 << 8) + 256))
  2287. n = snprintf(namebuf, buflen, "ILK%d:%d", (chan - 0x400) >> 8,
  2288. (chan - 0x400) & 0xFF);
  2289. else if (chan >= 0x800 && chan < (0x800 + (5 << 8) + (3 << 4) + 16))
  2290. n = snprintf(namebuf, buflen, "BGX%d:%d:%d", (chan - 0x800) >> 8,
  2291. ((chan - 0x800) >> 4) & 0x3, (chan - 0x800) & 0xF);
  2292. else
  2293. n = snprintf(namebuf, buflen, "--");
  2294. return n;
  2295. }
  2296. #ifdef CVMX_DUMP_DIAGNOSTICS
  2297. void cvmx_helper_dump_for_diagnostics(int node)
  2298. {
  2299. if (!(OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))) {
  2300. printf("Diagnostics are not implemented for this model\n");
  2301. return;
  2302. }
  2303. #ifdef CVMX_DUMP_GSER
  2304. {
  2305. int qlm, num_qlms;
  2306. num_qlms = cvmx_qlm_get_num();
  2307. for (qlm = 0; qlm < num_qlms; qlm++) {
  2308. cvmx_dump_gser_config_node(node, qlm);
  2309. cvmx_dump_gser_status_node(node, qlm);
  2310. }
  2311. }
  2312. #endif
  2313. #ifdef CVMX_DUMP_BGX
  2314. {
  2315. int bgx;
  2316. for (bgx = 0; bgx < CVMX_HELPER_MAX_GMX; bgx++) {
  2317. cvmx_dump_bgx_config_node(node, bgx);
  2318. cvmx_dump_bgx_status_node(node, bgx);
  2319. }
  2320. }
  2321. #endif
  2322. #ifdef CVMX_DUMP_PKI
  2323. cvmx_pki_config_dump(node);
  2324. cvmx_pki_stats_dump(node);
  2325. #endif
  2326. #ifdef CVMX_DUMP_PKO
  2327. cvmx_helper_pko3_config_dump(node);
  2328. cvmx_helper_pko3_stats_dump(node);
  2329. #endif
  2330. #ifdef CVMX_DUMO_SSO
  2331. cvmx_sso_config_dump(node);
  2332. #endif
  2333. }
  2334. #endif