cvmx-helper-cfg.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Marvell International Ltd.
  4. *
  5. * Helper Functions for the Configuration Framework
  6. */
  7. #include <log.h>
  8. #include <linux/delay.h>
  9. #include <mach/cvmx-regs.h>
  10. #include <mach/cvmx-csr.h>
  11. #include <mach/cvmx-bootmem.h>
  12. #include <mach/octeon-model.h>
  13. #include <mach/cvmx-fuse.h>
  14. #include <mach/octeon-feature.h>
  15. #include <mach/cvmx-qlm.h>
  16. #include <mach/octeon_qlm.h>
  17. #include <mach/cvmx-pcie.h>
  18. #include <mach/cvmx-coremask.h>
  19. #include <mach/cvmx-agl-defs.h>
  20. #include <mach/cvmx-bgxx-defs.h>
  21. #include <mach/cvmx-gmxx-defs.h>
  22. #include <mach/cvmx-ipd-defs.h>
  23. #include <mach/cvmx-pki-defs.h>
  24. #include <mach/cvmx-helper.h>
  25. #include <mach/cvmx-helper-board.h>
  26. #include <mach/cvmx-helper-fdt.h>
  27. #include <mach/cvmx-helper-bgx.h>
  28. #include <mach/cvmx-helper-cfg.h>
  29. #include <mach/cvmx-helper-util.h>
  30. #include <mach/cvmx-helper-pki.h>
  31. #include <mach/cvmx-global-resources.h>
  32. #include <mach/cvmx-pko-internal-ports-range.h>
  33. #include <mach/cvmx-ilk.h>
  34. #include <mach/cvmx-pip.h>
  35. DECLARE_GLOBAL_DATA_PTR;
  36. int cvmx_npi_max_pknds;
  37. static bool port_cfg_data_initialized;
  38. struct cvmx_cfg_port_param cvmx_cfg_port[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
  39. [CVMX_HELPER_CFG_MAX_PORT_PER_IFACE];
  40. /*
  41. * Indexed by the pko_port number
  42. */
  43. static int __cvmx_cfg_pko_highest_queue;
  44. struct cvmx_cfg_pko_port_param
  45. cvmx_pko_queue_table[CVMX_HELPER_CFG_MAX_PKO_PORT] = {
  46. [0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] = {
  47. CVMX_HELPER_CFG_INVALID_VALUE,
  48. CVMX_HELPER_CFG_INVALID_VALUE
  49. }
  50. };
  51. cvmx_user_static_pko_queue_config_t
  52. __cvmx_pko_queue_static_config[CVMX_MAX_NODES];
  53. struct cvmx_cfg_pko_port_map
  54. cvmx_cfg_pko_port_map[CVMX_HELPER_CFG_MAX_PKO_PORT] = {
  55. [0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] = {
  56. CVMX_HELPER_CFG_INVALID_VALUE,
  57. CVMX_HELPER_CFG_INVALID_VALUE,
  58. CVMX_HELPER_CFG_INVALID_VALUE
  59. }
  60. };
  61. /*
  62. * This array assists translation from ipd_port to pko_port.
  63. * The ``16'' is the rounded value for the 3rd 4-bit value of
  64. * ipd_port, used to differentiate ``interfaces.''
  65. */
  66. static struct cvmx_cfg_pko_port_pair
  67. ipd2pko_port_cache[16][CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
  68. [0 ... 15] = {
  69. [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE - 1] = {
  70. CVMX_HELPER_CFG_INVALID_VALUE,
  71. CVMX_HELPER_CFG_INVALID_VALUE
  72. }
  73. }
  74. };
  75. /*
  76. * Options
  77. *
  78. * Each array-elem's initial value is also the option's default value.
  79. */
  80. static u64 cvmx_cfg_opts[CVMX_HELPER_CFG_OPT_MAX] = {
  81. [0 ... CVMX_HELPER_CFG_OPT_MAX - 1] = 1
  82. };
  83. /*
  84. * MISC
  85. */
  86. static int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines allocated */
  87. static int cvmx_pko_queue_alloc(u64 port, int count);
  88. static void cvmx_init_port_cfg(void);
  89. static const int dbg;
  90. int __cvmx_helper_cfg_pknd(int xiface, int index)
  91. {
  92. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  93. int pkind;
  94. if (!port_cfg_data_initialized)
  95. cvmx_init_port_cfg();
  96. /*
  97. * Only 8 PKNDs are assigned to ILK channels. The channels are wrapped
  98. * if more than 8 channels are configured, fix the index accordingly.
  99. */
  100. if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
  101. if (cvmx_helper_interface_get_mode(xiface) ==
  102. CVMX_HELPER_INTERFACE_MODE_ILK)
  103. index %= 8;
  104. }
  105. pkind = cvmx_cfg_port[xi.node][xi.interface][index].ccpp_pknd;
  106. return pkind;
  107. }
  108. int __cvmx_helper_cfg_bpid(int xiface, int index)
  109. {
  110. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  111. if (!port_cfg_data_initialized)
  112. cvmx_init_port_cfg();
  113. /*
  114. * Only 8 BIDs are assigned to ILK channels. The channels are wrapped
  115. * if more than 8 channels are configured, fix the index accordingly.
  116. */
  117. if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
  118. if (cvmx_helper_interface_get_mode(xiface) ==
  119. CVMX_HELPER_INTERFACE_MODE_ILK)
  120. index %= 8;
  121. }
  122. return cvmx_cfg_port[xi.node][xi.interface][index].ccpp_bpid;
  123. }
  124. int __cvmx_helper_cfg_pko_port_base(int xiface, int index)
  125. {
  126. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  127. if (!port_cfg_data_initialized)
  128. cvmx_init_port_cfg();
  129. return cvmx_cfg_port[xi.node][xi.interface][index].ccpp_pko_port_base;
  130. }
  131. int __cvmx_helper_cfg_pko_port_num(int xiface, int index)
  132. {
  133. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  134. if (!port_cfg_data_initialized)
  135. cvmx_init_port_cfg();
  136. return cvmx_cfg_port[xi.node][xi.interface][index].ccpp_pko_num_ports;
  137. }
  138. int __cvmx_helper_cfg_pko_queue_num(int pko_port)
  139. {
  140. return cvmx_pko_queue_table[pko_port].ccppp_num_queues;
  141. }
  142. int __cvmx_helper_cfg_pko_queue_base(int pko_port)
  143. {
  144. return cvmx_pko_queue_table[pko_port].ccppp_queue_base;
  145. }
  146. int __cvmx_helper_cfg_pko_max_queue(void)
  147. {
  148. return __cvmx_cfg_pko_highest_queue;
  149. }
  150. int __cvmx_helper_cfg_pko_max_engine(void)
  151. {
  152. return cvmx_cfg_max_pko_engines;
  153. }
  154. int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val)
  155. {
  156. if (opt >= CVMX_HELPER_CFG_OPT_MAX)
  157. return -1;
  158. cvmx_cfg_opts[opt] = val;
  159. return 0;
  160. }
  161. uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt)
  162. {
  163. if (opt >= CVMX_HELPER_CFG_OPT_MAX)
  164. return (uint64_t)CVMX_HELPER_CFG_INVALID_VALUE;
  165. return cvmx_cfg_opts[opt];
  166. }
  167. /*
  168. * initialize the queue allocation list. the existing static allocation result
  169. * is used as a starting point to ensure backward compatibility.
  170. *
  171. * Return: 0 on success
  172. * -1 on failure
  173. */
  174. int cvmx_pko_queue_grp_alloc(u64 start, uint64_t end, uint64_t count)
  175. {
  176. u64 port;
  177. int ret_val;
  178. for (port = start; port < end; port++) {
  179. ret_val = cvmx_pko_queue_alloc(port, count);
  180. if (ret_val == -1) {
  181. printf("ERROR: %sL Failed to allocate queue for port=%d count=%d\n",
  182. __func__, (int)port, (int)count);
  183. return ret_val;
  184. }
  185. }
  186. return 0;
  187. }
  188. int cvmx_pko_queue_init_from_cvmx_config_non_pknd(void)
  189. {
  190. int ret_val = -1;
  191. u64 count, start, end;
  192. start = 0;
  193. end = __cvmx_pko_queue_static_config[0].non_pknd.pko_ports_per_interface[0];
  194. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_interface[0];
  195. cvmx_pko_queue_grp_alloc(start, end, count);
  196. start = 16;
  197. end = start + __cvmx_pko_queue_static_config[0].non_pknd.pko_ports_per_interface[1];
  198. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_interface[1];
  199. ret_val = cvmx_pko_queue_grp_alloc(start, end, count);
  200. if (ret_val != 0)
  201. return -1;
  202. if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
  203. /* Interface 4: AGL, PKO port 24 only, DPI 32-35 */
  204. start = 24;
  205. end = start + 1;
  206. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_interface[4];
  207. ret_val = cvmx_pko_queue_grp_alloc(start, end, count);
  208. if (ret_val != 0)
  209. return -1;
  210. end = 32; /* DPI first PKO poty */
  211. }
  212. start = end;
  213. end = 36;
  214. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_pci;
  215. cvmx_pko_queue_grp_alloc(start, end, count);
  216. if (ret_val != 0)
  217. return -1;
  218. start = end;
  219. end = 40;
  220. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_loop;
  221. cvmx_pko_queue_grp_alloc(start, end, count);
  222. if (ret_val != 0)
  223. return -1;
  224. start = end;
  225. end = 42;
  226. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[0];
  227. cvmx_pko_queue_grp_alloc(start, end, count);
  228. if (ret_val != 0)
  229. return -1;
  230. start = end;
  231. end = 44;
  232. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[1];
  233. cvmx_pko_queue_grp_alloc(start, end, count);
  234. if (ret_val != 0)
  235. return -1;
  236. start = end;
  237. end = 46;
  238. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[2];
  239. cvmx_pko_queue_grp_alloc(start, end, count);
  240. if (ret_val != 0)
  241. return -1;
  242. start = end;
  243. end = 48;
  244. count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[3];
  245. cvmx_pko_queue_grp_alloc(start, end, count);
  246. if (ret_val != 0)
  247. return -1;
  248. return 0;
  249. }
  250. int cvmx_helper_pko_queue_config_get(int node, cvmx_user_static_pko_queue_config_t *cfg)
  251. {
  252. *cfg = __cvmx_pko_queue_static_config[node];
  253. return 0;
  254. }
  255. int cvmx_helper_pko_queue_config_set(int node, cvmx_user_static_pko_queue_config_t *cfg)
  256. {
  257. __cvmx_pko_queue_static_config[node] = *cfg;
  258. return 0;
  259. }
  260. static int queue_range_init;
  261. int init_cvmx_pko_que_range(void)
  262. {
  263. int rv = 0;
  264. if (queue_range_init)
  265. return 0;
  266. queue_range_init = 1;
  267. rv = cvmx_create_global_resource_range(CVMX_GR_TAG_PKO_QUEUES,
  268. CVMX_HELPER_CFG_MAX_PKO_QUEUES);
  269. if (rv != 0)
  270. printf("ERROR: %s: Failed to initialize pko queues range\n", __func__);
  271. return rv;
  272. }
  273. /*
  274. * get a block of "count" queues for "port"
  275. *
  276. * @param port the port for which the queues are requested
  277. * @param count the number of queues requested
  278. *
  279. * Return: 0 on success
  280. * -1 on failure
  281. */
  282. static int cvmx_pko_queue_alloc(u64 port, int count)
  283. {
  284. int ret_val = -1;
  285. int highest_queue;
  286. init_cvmx_pko_que_range();
  287. if (cvmx_pko_queue_table[port].ccppp_num_queues == count)
  288. return cvmx_pko_queue_table[port].ccppp_queue_base;
  289. if (cvmx_pko_queue_table[port].ccppp_num_queues > 0) {
  290. printf("WARNING: %s port=%d already %d queues\n",
  291. __func__, (int)port,
  292. (int)cvmx_pko_queue_table[port].ccppp_num_queues);
  293. return -1;
  294. }
  295. if (port >= CVMX_HELPER_CFG_MAX_PKO_QUEUES) {
  296. printf("ERROR: %s port=%d > %d\n", __func__, (int)port,
  297. CVMX_HELPER_CFG_MAX_PKO_QUEUES);
  298. return -1;
  299. }
  300. ret_val = cvmx_allocate_global_resource_range(CVMX_GR_TAG_PKO_QUEUES,
  301. port, count, 1);
  302. debug("%s: pko_e_port=%i q_base=%i q_count=%i\n",
  303. __func__, (int)port, ret_val, (int)count);
  304. if (ret_val == -1)
  305. return ret_val;
  306. cvmx_pko_queue_table[port].ccppp_queue_base = ret_val;
  307. cvmx_pko_queue_table[port].ccppp_num_queues = count;
  308. highest_queue = ret_val + count - 1;
  309. if (highest_queue > __cvmx_cfg_pko_highest_queue)
  310. __cvmx_cfg_pko_highest_queue = highest_queue;
  311. return 0;
  312. }
  313. /*
  314. * return the queues for "port"
  315. *
  316. * @param port the port for which the queues are returned
  317. *
  318. * Return: 0 on success
  319. * -1 on failure
  320. */
  321. int cvmx_pko_queue_free(uint64_t port)
  322. {
  323. int ret_val = -1;
  324. init_cvmx_pko_que_range();
  325. if (port >= CVMX_HELPER_CFG_MAX_PKO_QUEUES) {
  326. debug("ERROR: %s port=%d > %d", __func__, (int)port,
  327. CVMX_HELPER_CFG_MAX_PKO_QUEUES);
  328. return -1;
  329. }
  330. ret_val = cvmx_free_global_resource_range_with_base(
  331. CVMX_GR_TAG_PKO_QUEUES, cvmx_pko_queue_table[port].ccppp_queue_base,
  332. cvmx_pko_queue_table[port].ccppp_num_queues);
  333. if (ret_val != 0)
  334. return ret_val;
  335. cvmx_pko_queue_table[port].ccppp_num_queues = 0;
  336. cvmx_pko_queue_table[port].ccppp_queue_base = CVMX_HELPER_CFG_INVALID_VALUE;
  337. ret_val = 0;
  338. return ret_val;
  339. }
  340. void cvmx_pko_queue_free_all(void)
  341. {
  342. int i;
  343. for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
  344. if (cvmx_pko_queue_table[i].ccppp_queue_base !=
  345. CVMX_HELPER_CFG_INVALID_VALUE)
  346. cvmx_pko_queue_free(i);
  347. }
  348. void cvmx_pko_queue_show(void)
  349. {
  350. int i;
  351. cvmx_show_global_resource_range(CVMX_GR_TAG_PKO_QUEUES);
  352. for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
  353. if (cvmx_pko_queue_table[i].ccppp_queue_base !=
  354. CVMX_HELPER_CFG_INVALID_VALUE)
  355. debug("port=%d que_base=%d que_num=%d\n", i,
  356. (int)cvmx_pko_queue_table[i].ccppp_queue_base,
  357. (int)cvmx_pko_queue_table[i].ccppp_num_queues);
  358. }
  359. void cvmx_helper_cfg_show_cfg(void)
  360. {
  361. int i, j;
  362. for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
  363. debug("%s: interface%d mode %10s nports%4d\n", __func__, i,
  364. cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(i)),
  365. cvmx_helper_interface_enumerate(i));
  366. for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
  367. debug("\tpknd[%i][%d]%d", i, j,
  368. __cvmx_helper_cfg_pknd(i, j));
  369. debug(" pko_port_base[%i][%d]%d", i, j,
  370. __cvmx_helper_cfg_pko_port_base(i, j));
  371. debug(" pko_port_num[%i][%d]%d\n", i, j,
  372. __cvmx_helper_cfg_pko_port_num(i, j));
  373. }
  374. }
  375. for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
  376. if (__cvmx_helper_cfg_pko_queue_base(i) !=
  377. CVMX_HELPER_CFG_INVALID_VALUE) {
  378. debug("%s: pko_port%d qbase%d nqueues%d interface%d index%d\n",
  379. __func__, i, __cvmx_helper_cfg_pko_queue_base(i),
  380. __cvmx_helper_cfg_pko_queue_num(i),
  381. __cvmx_helper_cfg_pko_port_interface(i),
  382. __cvmx_helper_cfg_pko_port_index(i));
  383. }
  384. }
  385. }
  386. /*
  387. * initialize cvmx_cfg_pko_port_map
  388. */
  389. void cvmx_helper_cfg_init_pko_port_map(void)
  390. {
  391. int i, j, k;
  392. int pko_eid;
  393. int pko_port_base, pko_port_max;
  394. cvmx_helper_interface_mode_t mode;
  395. if (!port_cfg_data_initialized)
  396. cvmx_init_port_cfg();
  397. /*
  398. * one pko_eid is allocated to each port except for ILK, NPI, and
  399. * LOOP. Each of the three has one eid.
  400. */
  401. pko_eid = 0;
  402. for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
  403. mode = cvmx_helper_interface_get_mode(i);
  404. for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
  405. pko_port_base = cvmx_cfg_port[0][i][j].ccpp_pko_port_base;
  406. pko_port_max = pko_port_base + cvmx_cfg_port[0][i][j].ccpp_pko_num_ports;
  407. if (!octeon_has_feature(OCTEON_FEATURE_PKO3)) {
  408. cvmx_helper_cfg_assert(pko_port_base !=
  409. CVMX_HELPER_CFG_INVALID_VALUE);
  410. cvmx_helper_cfg_assert(pko_port_max >= pko_port_base);
  411. }
  412. for (k = pko_port_base; k < pko_port_max; k++) {
  413. cvmx_cfg_pko_port_map[k].ccppl_interface = i;
  414. cvmx_cfg_pko_port_map[k].ccppl_index = j;
  415. cvmx_cfg_pko_port_map[k].ccppl_eid = pko_eid;
  416. }
  417. if (!(mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
  418. mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
  419. mode == CVMX_HELPER_INTERFACE_MODE_ILK))
  420. pko_eid++;
  421. }
  422. if (mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
  423. mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
  424. mode == CVMX_HELPER_INTERFACE_MODE_ILK)
  425. pko_eid++;
  426. }
  427. /*
  428. * Legal pko_eids [0, 0x13] should not be exhausted.
  429. */
  430. if (!octeon_has_feature(OCTEON_FEATURE_PKO3))
  431. cvmx_helper_cfg_assert(pko_eid <= 0x14);
  432. cvmx_cfg_max_pko_engines = pko_eid;
  433. }
  434. void cvmx_helper_cfg_set_jabber_and_frame_max(void)
  435. {
  436. int interface, port;
  437. /*Set the frame max size and jabber size to 65535. */
  438. const unsigned int max_frame = 65535;
  439. // FIXME: should support node argument for remote node init
  440. if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
  441. int ipd_port;
  442. int node = cvmx_get_node_num();
  443. for (interface = 0;
  444. interface < cvmx_helper_get_number_of_interfaces();
  445. interface++) {
  446. int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
  447. cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
  448. int num_ports = cvmx_helper_ports_on_interface(xiface);
  449. // FIXME: should be an easier way to determine
  450. // that an interface is Ethernet/BGX
  451. switch (imode) {
  452. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  453. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  454. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  455. case CVMX_HELPER_INTERFACE_MODE_XLAUI:
  456. case CVMX_HELPER_INTERFACE_MODE_XFI:
  457. case CVMX_HELPER_INTERFACE_MODE_10G_KR:
  458. case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
  459. for (port = 0; port < num_ports; port++) {
  460. ipd_port = cvmx_helper_get_ipd_port(xiface, port);
  461. cvmx_pki_set_max_frm_len(ipd_port, max_frame);
  462. cvmx_helper_bgx_set_jabber(xiface, port, max_frame);
  463. }
  464. break;
  465. default:
  466. break;
  467. }
  468. }
  469. } else {
  470. /*Set the frame max size and jabber size to 65535. */
  471. for (interface = 0; interface < cvmx_helper_get_number_of_interfaces();
  472. interface++) {
  473. int xiface = cvmx_helper_node_interface_to_xiface(cvmx_get_node_num(),
  474. interface);
  475. /*
  476. * Set the frame max size and jabber size to 65535, as the defaults
  477. * are too small.
  478. */
  479. cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
  480. int num_ports = cvmx_helper_ports_on_interface(xiface);
  481. switch (imode) {
  482. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  483. case CVMX_HELPER_INTERFACE_MODE_QSGMII:
  484. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  485. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  486. for (port = 0; port < num_ports; port++)
  487. csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
  488. /* Set max and min value for frame check */
  489. cvmx_pip_set_frame_check(interface, -1);
  490. break;
  491. case CVMX_HELPER_INTERFACE_MODE_RGMII:
  492. case CVMX_HELPER_INTERFACE_MODE_GMII:
  493. /* Set max and min value for frame check */
  494. cvmx_pip_set_frame_check(interface, -1);
  495. for (port = 0; port < num_ports; port++) {
  496. csr_wr(CVMX_GMXX_RXX_FRM_MAX(port, interface), 65535);
  497. csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
  498. }
  499. break;
  500. case CVMX_HELPER_INTERFACE_MODE_ILK:
  501. /* Set max and min value for frame check */
  502. cvmx_pip_set_frame_check(interface, -1);
  503. for (port = 0; port < num_ports; port++) {
  504. int ipd_port = cvmx_helper_get_ipd_port(interface, port);
  505. cvmx_ilk_enable_la_header(ipd_port, 0);
  506. }
  507. break;
  508. case CVMX_HELPER_INTERFACE_MODE_SRIO:
  509. /* Set max and min value for frame check */
  510. cvmx_pip_set_frame_check(interface, -1);
  511. break;
  512. case CVMX_HELPER_INTERFACE_MODE_AGL:
  513. /* Set max and min value for frame check */
  514. cvmx_pip_set_frame_check(interface, -1);
  515. csr_wr(CVMX_AGL_GMX_RXX_FRM_MAX(0), 65535);
  516. csr_wr(CVMX_AGL_GMX_RXX_JABBER(0), 65535);
  517. break;
  518. default:
  519. break;
  520. }
  521. }
  522. }
  523. }
  524. /**
  525. * Enable storing short packets only in the WQE
  526. * unless NO_WPTR is set, which already has the same effect
  527. */
  528. void cvmx_helper_cfg_store_short_packets_in_wqe(void)
  529. {
  530. int interface, port;
  531. cvmx_ipd_ctl_status_t ipd_ctl_status;
  532. unsigned int dyn_rs = 1;
  533. if (octeon_has_feature(OCTEON_FEATURE_PKI))
  534. return;
  535. /* NO_WPTR combines WQE with 1st MBUF, RS is redundant */
  536. ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
  537. if (ipd_ctl_status.s.no_wptr) {
  538. dyn_rs = 0;
  539. /* Note: consider also setting 'ignrs' wtn NO_WPTR is set */
  540. }
  541. for (interface = 0; interface < cvmx_helper_get_number_of_interfaces(); interface++) {
  542. int num_ports = cvmx_helper_ports_on_interface(interface);
  543. for (port = 0; port < num_ports; port++) {
  544. cvmx_pip_port_cfg_t port_cfg;
  545. int pknd = port;
  546. if (octeon_has_feature(OCTEON_FEATURE_PKND))
  547. pknd = cvmx_helper_get_pknd(interface, port);
  548. else
  549. pknd = cvmx_helper_get_ipd_port(interface, port);
  550. port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
  551. port_cfg.s.dyn_rs = dyn_rs;
  552. csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
  553. }
  554. }
  555. }
  556. int __cvmx_helper_cfg_pko_port_interface(int pko_port)
  557. {
  558. return cvmx_cfg_pko_port_map[pko_port].ccppl_interface;
  559. }
  560. int __cvmx_helper_cfg_pko_port_index(int pko_port)
  561. {
  562. return cvmx_cfg_pko_port_map[pko_port].ccppl_index;
  563. }
  564. int __cvmx_helper_cfg_pko_port_eid(int pko_port)
  565. {
  566. return cvmx_cfg_pko_port_map[pko_port].ccppl_eid;
  567. }
  568. #define IPD2PKO_CACHE_Y(ipd_port) (ipd_port) >> 8
  569. #define IPD2PKO_CACHE_X(ipd_port) (ipd_port) & 0xff
  570. static inline int __cvmx_helper_cfg_ipd2pko_cachex(int ipd_port)
  571. {
  572. int ipd_x = IPD2PKO_CACHE_X(ipd_port);
  573. if (ipd_port & 0x800)
  574. ipd_x = (ipd_x >> 4) & 3;
  575. return ipd_x;
  576. }
  577. /*
  578. * ipd_port to pko_port translation cache
  579. */
  580. int __cvmx_helper_cfg_init_ipd2pko_cache(void)
  581. {
  582. int i, j, n;
  583. int ipd_y, ipd_x, ipd_port;
  584. for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
  585. n = cvmx_helper_interface_enumerate(i);
  586. for (j = 0; j < n; j++) {
  587. ipd_port = cvmx_helper_get_ipd_port(i, j);
  588. ipd_y = IPD2PKO_CACHE_Y(ipd_port);
  589. ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
  590. ipd2pko_port_cache[ipd_y][ipd_x] = (struct cvmx_cfg_pko_port_pair){
  591. __cvmx_helper_cfg_pko_port_base(i, j),
  592. __cvmx_helper_cfg_pko_port_num(i, j)
  593. };
  594. }
  595. }
  596. return 0;
  597. }
  598. int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port)
  599. {
  600. int ipd_y, ipd_x;
  601. /* Internal PKO ports are not present in PKO3 */
  602. if (octeon_has_feature(OCTEON_FEATURE_PKI))
  603. return ipd_port;
  604. ipd_y = IPD2PKO_CACHE_Y(ipd_port);
  605. ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
  606. return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_base_port;
  607. }
  608. int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port)
  609. {
  610. int ipd_y, ipd_x;
  611. ipd_y = IPD2PKO_CACHE_Y(ipd_port);
  612. ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
  613. return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_nports;
  614. }
  615. /**
  616. * Return the number of queues to be assigned to this pko_port
  617. *
  618. * @param pko_port
  619. * Return: the number of queues for this pko_port
  620. *
  621. */
  622. static int cvmx_helper_cfg_dft_nqueues(int pko_port)
  623. {
  624. cvmx_helper_interface_mode_t mode;
  625. int interface;
  626. int n;
  627. int ret;
  628. interface = __cvmx_helper_cfg_pko_port_interface(pko_port);
  629. mode = cvmx_helper_interface_get_mode(interface);
  630. n = NUM_ELEMENTS(__cvmx_pko_queue_static_config[0].pknd.pko_cfg_iface);
  631. if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
  632. ret = __cvmx_pko_queue_static_config[0].pknd.pko_cfg_loop.queues_per_port;
  633. } else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
  634. ret = __cvmx_pko_queue_static_config[0].pknd.pko_cfg_npi.queues_per_port;
  635. }
  636. else if ((interface >= 0) && (interface < n)) {
  637. ret = __cvmx_pko_queue_static_config[0].pknd.pko_cfg_iface[interface].queues_per_port;
  638. } else {
  639. /* Should never be called */
  640. ret = 1;
  641. }
  642. /* Override for sanity in case of empty static config table */
  643. if (ret == 0)
  644. ret = 1;
  645. return ret;
  646. }
  647. static int cvmx_helper_cfg_init_pko_iports_and_queues_using_static_config(void)
  648. {
  649. int pko_port_base = 0;
  650. int cvmx_cfg_default_pko_nports = 1;
  651. int i, j, n, k;
  652. int rv = 0;
  653. if (!port_cfg_data_initialized)
  654. cvmx_init_port_cfg();
  655. /* When not using config file, each port is assigned one internal pko port*/
  656. for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
  657. n = cvmx_helper_interface_enumerate(i);
  658. for (j = 0; j < n; j++) {
  659. cvmx_cfg_port[0][i][j].ccpp_pko_port_base = pko_port_base;
  660. cvmx_cfg_port[0][i][j].ccpp_pko_num_ports = cvmx_cfg_default_pko_nports;
  661. /*
  662. * Initialize interface early here so that the
  663. * cvmx_helper_cfg_dft_nqueues() below
  664. * can get the interface number corresponding to the
  665. * pko port
  666. */
  667. for (k = pko_port_base; k < pko_port_base + cvmx_cfg_default_pko_nports;
  668. k++) {
  669. cvmx_cfg_pko_port_map[k].ccppl_interface = i;
  670. }
  671. pko_port_base += cvmx_cfg_default_pko_nports;
  672. }
  673. }
  674. cvmx_helper_cfg_assert(pko_port_base <= CVMX_HELPER_CFG_MAX_PKO_PORT);
  675. /* Assigning queues per pko */
  676. for (i = 0; i < pko_port_base; i++) {
  677. int base;
  678. n = cvmx_helper_cfg_dft_nqueues(i);
  679. base = cvmx_pko_queue_alloc(i, n);
  680. if (base == -1) {
  681. printf("ERROR: %s: failed to alloc %d queues for pko port=%d\n", __func__,
  682. n, i);
  683. rv = -1;
  684. }
  685. }
  686. return rv;
  687. }
  688. /**
  689. * Returns if port is valid for a given interface
  690. *
  691. * @param xiface interface to check
  692. * @param index port index in the interface
  693. *
  694. * Return: status of the port present or not.
  695. */
  696. int cvmx_helper_is_port_valid(int xiface, int index)
  697. {
  698. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  699. if (!port_cfg_data_initialized)
  700. cvmx_init_port_cfg();
  701. return cvmx_cfg_port[xi.node][xi.interface][index].valid;
  702. }
  703. void cvmx_helper_set_port_valid(int xiface, int index, bool valid)
  704. {
  705. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  706. if (!port_cfg_data_initialized)
  707. cvmx_init_port_cfg();
  708. cvmx_cfg_port[xi.node][xi.interface][index].valid = valid;
  709. }
  710. void cvmx_helper_set_mac_phy_mode(int xiface, int index, bool valid)
  711. {
  712. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  713. if (!port_cfg_data_initialized)
  714. cvmx_init_port_cfg();
  715. cvmx_cfg_port[xi.node][xi.interface][index].sgmii_phy_mode = valid;
  716. }
  717. bool cvmx_helper_get_mac_phy_mode(int xiface, int index)
  718. {
  719. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  720. if (!port_cfg_data_initialized)
  721. cvmx_init_port_cfg();
  722. return cvmx_cfg_port[xi.node][xi.interface][index].sgmii_phy_mode;
  723. }
  724. void cvmx_helper_set_1000x_mode(int xiface, int index, bool valid)
  725. {
  726. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  727. if (!port_cfg_data_initialized)
  728. cvmx_init_port_cfg();
  729. cvmx_cfg_port[xi.node][xi.interface][index].sgmii_1000x_mode = valid;
  730. }
  731. bool cvmx_helper_get_1000x_mode(int xiface, int index)
  732. {
  733. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  734. if (!port_cfg_data_initialized)
  735. cvmx_init_port_cfg();
  736. return cvmx_cfg_port[xi.node][xi.interface][index].sgmii_1000x_mode;
  737. }
  738. void cvmx_helper_set_agl_rx_clock_delay_bypass(int xiface, int index, bool valid)
  739. {
  740. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  741. if (!port_cfg_data_initialized)
  742. cvmx_init_port_cfg();
  743. cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_delay_bypass = valid;
  744. }
  745. bool cvmx_helper_get_agl_rx_clock_delay_bypass(int xiface, int index)
  746. {
  747. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  748. if (!port_cfg_data_initialized)
  749. cvmx_init_port_cfg();
  750. return cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_delay_bypass;
  751. }
  752. void cvmx_helper_set_agl_rx_clock_skew(int xiface, int index, uint8_t value)
  753. {
  754. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  755. if (!port_cfg_data_initialized)
  756. cvmx_init_port_cfg();
  757. cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_skew = value;
  758. }
  759. uint8_t cvmx_helper_get_agl_rx_clock_skew(int xiface, int index)
  760. {
  761. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  762. if (!port_cfg_data_initialized)
  763. cvmx_init_port_cfg();
  764. return cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_skew;
  765. }
  766. void cvmx_helper_set_agl_refclk_sel(int xiface, int index, uint8_t value)
  767. {
  768. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  769. if (!port_cfg_data_initialized)
  770. cvmx_init_port_cfg();
  771. cvmx_cfg_port[xi.node][xi.interface][index].agl_refclk_sel = value;
  772. }
  773. uint8_t cvmx_helper_get_agl_refclk_sel(int xiface, int index)
  774. {
  775. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  776. if (!port_cfg_data_initialized)
  777. cvmx_init_port_cfg();
  778. return cvmx_cfg_port[xi.node][xi.interface][index].agl_refclk_sel;
  779. }
  780. void cvmx_helper_set_port_force_link_up(int xiface, int index, bool value)
  781. {
  782. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  783. if (!port_cfg_data_initialized)
  784. cvmx_init_port_cfg();
  785. cvmx_cfg_port[xi.node][xi.interface][index].force_link_up = value;
  786. }
  787. bool cvmx_helper_get_port_force_link_up(int xiface, int index)
  788. {
  789. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  790. if (!port_cfg_data_initialized)
  791. cvmx_init_port_cfg();
  792. return cvmx_cfg_port[xi.node][xi.interface][index].force_link_up;
  793. }
  794. void cvmx_helper_set_port_phy_present(int xiface, int index, bool value)
  795. {
  796. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  797. if (!port_cfg_data_initialized)
  798. cvmx_init_port_cfg();
  799. cvmx_cfg_port[xi.node][xi.interface][index].phy_present = value;
  800. }
  801. bool cvmx_helper_get_port_phy_present(int xiface, int index)
  802. {
  803. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  804. if (!port_cfg_data_initialized)
  805. cvmx_init_port_cfg();
  806. return cvmx_cfg_port[xi.node][xi.interface][index].phy_present;
  807. }
  808. int __cvmx_helper_init_port_valid(void)
  809. {
  810. int i, j, node;
  811. bool valid;
  812. static void *fdt_addr;
  813. int rc;
  814. struct cvmx_coremask pcm;
  815. octeon_get_available_coremask(&pcm);
  816. if (fdt_addr == 0)
  817. fdt_addr = __cvmx_phys_addr_to_ptr((u64)gd->fdt_blob, 128 * 1024);
  818. if (!port_cfg_data_initialized)
  819. cvmx_init_port_cfg();
  820. if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
  821. rc = __cvmx_helper_parse_bgx_dt(fdt_addr);
  822. if (!rc)
  823. rc = __cvmx_fdt_parse_vsc7224(fdt_addr);
  824. if (!rc)
  825. rc = __cvmx_fdt_parse_avsp5410(fdt_addr);
  826. if (!rc && octeon_has_feature(OCTEON_FEATURE_BGX_XCV))
  827. rc = __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr);
  828. /* Some ports are not in sequence, the device tree does not
  829. * clear them.
  830. *
  831. * Also clear any ports that are not defined in the device tree.
  832. * Apply this to each node.
  833. */
  834. for (node = 0; node < CVMX_MAX_NODES; node++) {
  835. if (!cvmx_coremask_get64_node(&pcm, node))
  836. continue;
  837. for (i = 0; i < CVMX_HELPER_MAX_GMX; i++) {
  838. int j;
  839. int xiface = cvmx_helper_node_interface_to_xiface(node, i);
  840. for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
  841. cvmx_bgxx_cmrx_config_t cmr_config;
  842. cmr_config.u64 =
  843. csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(j, i));
  844. if ((cmr_config.s.lane_to_sds == 0xe4 &&
  845. cmr_config.s.lmac_type != 4 &&
  846. cmr_config.s.lmac_type != 1 &&
  847. cmr_config.s.lmac_type != 5) ||
  848. ((cvmx_helper_get_port_fdt_node_offset(xiface, j) ==
  849. CVMX_HELPER_CFG_INVALID_VALUE)))
  850. cvmx_helper_set_port_valid(xiface, j, false);
  851. }
  852. }
  853. }
  854. return rc;
  855. }
  856. /* TODO: Update this to behave more like 78XX */
  857. for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
  858. int n = cvmx_helper_interface_enumerate(i);
  859. for (j = 0; j < n; j++) {
  860. int ipd_port = cvmx_helper_get_ipd_port(i, j);
  861. valid = (__cvmx_helper_board_get_port_from_dt(fdt_addr, ipd_port) == 1);
  862. cvmx_helper_set_port_valid(i, j, valid);
  863. }
  864. }
  865. return 0;
  866. }
  867. typedef int (*cvmx_import_config_t)(void);
  868. cvmx_import_config_t cvmx_import_app_config;
  869. int __cvmx_helper_init_port_config_data_local(void)
  870. {
  871. int rv = 0;
  872. int dbg = 0;
  873. if (!port_cfg_data_initialized)
  874. cvmx_init_port_cfg();
  875. if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
  876. if (cvmx_import_app_config) {
  877. rv = (*cvmx_import_app_config)();
  878. if (rv != 0) {
  879. debug("failed to import config\n");
  880. return -1;
  881. }
  882. }
  883. cvmx_helper_cfg_init_pko_port_map();
  884. __cvmx_helper_cfg_init_ipd2pko_cache();
  885. } else {
  886. if (cvmx_import_app_config) {
  887. rv = (*cvmx_import_app_config)();
  888. if (rv != 0) {
  889. debug("failed to import config\n");
  890. return -1;
  891. }
  892. }
  893. }
  894. if (dbg) {
  895. cvmx_helper_cfg_show_cfg();
  896. cvmx_pko_queue_show();
  897. }
  898. return rv;
  899. }
  900. /*
  901. * This call is made from Linux octeon_ethernet driver
  902. * to setup the PKO with a specific queue count and
  903. * internal port count configuration.
  904. */
  905. int cvmx_pko_alloc_iport_and_queues(int interface, int port, int port_cnt, int queue_cnt)
  906. {
  907. int rv, p, port_start, cnt;
  908. if (dbg)
  909. debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
  910. queue_cnt);
  911. if (!port_cfg_data_initialized)
  912. cvmx_init_port_cfg();
  913. if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
  914. rv = cvmx_pko_internal_ports_alloc(interface, port, port_cnt);
  915. if (rv < 0) {
  916. printf("ERROR: %s: failed to allocate internal ports forinterface=%d port=%d cnt=%d\n",
  917. __func__, interface, port, port_cnt);
  918. return -1;
  919. }
  920. port_start = __cvmx_helper_cfg_pko_port_base(interface, port);
  921. cnt = __cvmx_helper_cfg_pko_port_num(interface, port);
  922. } else {
  923. port_start = cvmx_helper_get_ipd_port(interface, port);
  924. cnt = 1;
  925. }
  926. for (p = port_start; p < port_start + cnt; p++) {
  927. rv = cvmx_pko_queue_alloc(p, queue_cnt);
  928. if (rv < 0) {
  929. printf("ERROR: %s: failed to allocate queues for port=%d cnt=%d\n",
  930. __func__, p, queue_cnt);
  931. return -1;
  932. }
  933. }
  934. return 0;
  935. }
  936. static void cvmx_init_port_cfg(void)
  937. {
  938. int node, i, j;
  939. if (port_cfg_data_initialized)
  940. return;
  941. for (node = 0; node < CVMX_MAX_NODES; node++) {
  942. for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
  943. for (j = 0; j < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; j++) {
  944. struct cvmx_cfg_port_param *pcfg;
  945. struct cvmx_srio_port_param *sr;
  946. pcfg = &cvmx_cfg_port[node][i][j];
  947. memset(pcfg, 0, sizeof(*pcfg));
  948. pcfg->port_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
  949. pcfg->phy_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
  950. pcfg->phy_info = NULL;
  951. pcfg->ccpp_pknd = CVMX_HELPER_CFG_INVALID_VALUE;
  952. pcfg->ccpp_bpid = CVMX_HELPER_CFG_INVALID_VALUE;
  953. pcfg->ccpp_pko_port_base = CVMX_HELPER_CFG_INVALID_VALUE;
  954. pcfg->ccpp_pko_num_ports = CVMX_HELPER_CFG_INVALID_VALUE;
  955. pcfg->agl_rx_clk_skew = 0;
  956. pcfg->valid = true;
  957. pcfg->sgmii_phy_mode = false;
  958. pcfg->sgmii_1000x_mode = false;
  959. pcfg->agl_rx_clk_delay_bypass = false;
  960. pcfg->force_link_up = false;
  961. pcfg->disable_an = false;
  962. pcfg->link_down_pwr_dn = false;
  963. pcfg->phy_present = false;
  964. pcfg->tx_clk_delay_bypass = false;
  965. pcfg->rgmii_tx_clk_delay = 0;
  966. pcfg->enable_fec = false;
  967. sr = &pcfg->srio_short;
  968. sr->srio_rx_ctle_agc_override = false;
  969. sr->srio_rx_ctle_zero = 0x6;
  970. sr->srio_rx_agc_pre_ctle = 0x5;
  971. sr->srio_rx_agc_post_ctle = 0x4;
  972. sr->srio_tx_swing_override = false;
  973. sr->srio_tx_swing = 0x7;
  974. sr->srio_tx_premptap_override = false;
  975. sr->srio_tx_premptap_pre = 0;
  976. sr->srio_tx_premptap_post = 0xF;
  977. sr->srio_tx_gain_override = false;
  978. sr->srio_tx_gain = 0x3;
  979. sr->srio_tx_vboost_override = 0;
  980. sr->srio_tx_vboost = true;
  981. sr = &pcfg->srio_long;
  982. sr->srio_rx_ctle_agc_override = false;
  983. sr->srio_rx_ctle_zero = 0x6;
  984. sr->srio_rx_agc_pre_ctle = 0x5;
  985. sr->srio_rx_agc_post_ctle = 0x4;
  986. sr->srio_tx_swing_override = false;
  987. sr->srio_tx_swing = 0x7;
  988. sr->srio_tx_premptap_override = false;
  989. sr->srio_tx_premptap_pre = 0;
  990. sr->srio_tx_premptap_post = 0xF;
  991. sr->srio_tx_gain_override = false;
  992. sr->srio_tx_gain = 0x3;
  993. sr->srio_tx_vboost_override = 0;
  994. sr->srio_tx_vboost = true;
  995. pcfg->agl_refclk_sel = 0;
  996. pcfg->sfp_of_offset = -1;
  997. pcfg->vsc7224_chan = NULL;
  998. }
  999. }
  1000. }
  1001. port_cfg_data_initialized = true;
  1002. }
  1003. int __cvmx_helper_init_port_config_data(int node)
  1004. {
  1005. int rv = 0;
  1006. int i, j, n;
  1007. int num_interfaces, interface;
  1008. int pknd = 0, bpid = 0;
  1009. const int use_static_config = 1;
  1010. if (dbg)
  1011. printf("%s:\n", __func__);
  1012. if (!port_cfg_data_initialized)
  1013. cvmx_init_port_cfg();
  1014. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  1015. /* PKO3: only needs BPID, PKND to be setup,
  1016. * while the rest of PKO3 init is done in cvmx-helper-pko3.c
  1017. */
  1018. pknd = 0;
  1019. bpid = 0;
  1020. for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
  1021. int xiface = cvmx_helper_node_interface_to_xiface(node, i);
  1022. n = cvmx_helper_interface_enumerate(xiface);
  1023. /*
  1024. * Assign 8 pknds to ILK interface, these pknds will be
  1025. * distributed among the channels configured
  1026. */
  1027. if (cvmx_helper_interface_get_mode(xiface) ==
  1028. CVMX_HELPER_INTERFACE_MODE_ILK) {
  1029. if (n > 8)
  1030. n = 8;
  1031. }
  1032. if (cvmx_helper_interface_get_mode(xiface) !=
  1033. CVMX_HELPER_INTERFACE_MODE_NPI) {
  1034. for (j = 0; j < n; j++) {
  1035. struct cvmx_cfg_port_param *pcfg;
  1036. pcfg = &cvmx_cfg_port[node][i][j];
  1037. pcfg->ccpp_pknd = pknd++;
  1038. pcfg->ccpp_bpid = bpid++;
  1039. }
  1040. } else {
  1041. for (j = 0; j < n; j++) {
  1042. if (j == n / cvmx_npi_max_pknds) {
  1043. pknd++;
  1044. bpid++;
  1045. }
  1046. cvmx_cfg_port[node][i][j].ccpp_pknd = pknd;
  1047. cvmx_cfg_port[node][i][j].ccpp_bpid = bpid;
  1048. }
  1049. pknd++;
  1050. bpid++;
  1051. }
  1052. } /* for i=0 */
  1053. cvmx_helper_cfg_assert(pknd <= CVMX_HELPER_CFG_MAX_PIP_PKND);
  1054. cvmx_helper_cfg_assert(bpid <= CVMX_HELPER_CFG_MAX_PIP_BPID);
  1055. } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
  1056. if (use_static_config)
  1057. cvmx_helper_cfg_init_pko_iports_and_queues_using_static_config();
  1058. /* Initialize pknd and bpid */
  1059. for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
  1060. n = cvmx_helper_interface_enumerate(i);
  1061. for (j = 0; j < n; j++) {
  1062. cvmx_cfg_port[0][i][j].ccpp_pknd = pknd++;
  1063. cvmx_cfg_port[0][i][j].ccpp_bpid = bpid++;
  1064. }
  1065. }
  1066. cvmx_helper_cfg_assert(pknd <= CVMX_HELPER_CFG_MAX_PIP_PKND);
  1067. cvmx_helper_cfg_assert(bpid <= CVMX_HELPER_CFG_MAX_PIP_BPID);
  1068. } else {
  1069. if (use_static_config)
  1070. cvmx_pko_queue_init_from_cvmx_config_non_pknd();
  1071. }
  1072. /* Remainder not used for PKO3 */
  1073. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
  1074. return 0;
  1075. /* init ports, queues which are not initialized */
  1076. num_interfaces = cvmx_helper_get_number_of_interfaces();
  1077. for (interface = 0; interface < num_interfaces; interface++) {
  1078. int num_ports = __cvmx_helper_early_ports_on_interface(interface);
  1079. int port, port_base, queue;
  1080. for (port = 0; port < num_ports; port++) {
  1081. bool init_req = false;
  1082. if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
  1083. port_base = __cvmx_helper_cfg_pko_port_base(interface, port);
  1084. if (port_base == CVMX_HELPER_CFG_INVALID_VALUE)
  1085. init_req = true;
  1086. } else {
  1087. port_base = cvmx_helper_get_ipd_port(interface, port);
  1088. queue = __cvmx_helper_cfg_pko_queue_base(port_base);
  1089. if (queue == CVMX_HELPER_CFG_INVALID_VALUE)
  1090. init_req = true;
  1091. }
  1092. if (init_req) {
  1093. rv = cvmx_pko_alloc_iport_and_queues(interface, port, 1, 1);
  1094. if (rv < 0) {
  1095. debug("cvm_pko_alloc_iport_and_queues failed.\n");
  1096. return rv;
  1097. }
  1098. }
  1099. }
  1100. }
  1101. if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
  1102. cvmx_helper_cfg_init_pko_port_map();
  1103. __cvmx_helper_cfg_init_ipd2pko_cache();
  1104. }
  1105. if (dbg) {
  1106. cvmx_helper_cfg_show_cfg();
  1107. cvmx_pko_queue_show();
  1108. }
  1109. return rv;
  1110. }
  1111. /**
  1112. * @INTERNAL
  1113. * Store the FDT node offset in the device tree of a port
  1114. *
  1115. * @param xiface node and interface
  1116. * @param index port index
  1117. * @param node_offset node offset to store
  1118. */
  1119. void cvmx_helper_set_port_fdt_node_offset(int xiface, int index, int node_offset)
  1120. {
  1121. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1122. if (!port_cfg_data_initialized)
  1123. cvmx_init_port_cfg();
  1124. cvmx_cfg_port[xi.node][xi.interface][index].port_fdt_node = node_offset;
  1125. }
  1126. /**
  1127. * @INTERNAL
  1128. * Return the FDT node offset in the device tree of a port
  1129. *
  1130. * @param xiface node and interface
  1131. * @param index port index
  1132. * Return: node offset of port or -1 if invalid
  1133. */
  1134. int cvmx_helper_get_port_fdt_node_offset(int xiface, int index)
  1135. {
  1136. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1137. if (!port_cfg_data_initialized)
  1138. cvmx_init_port_cfg();
  1139. return cvmx_cfg_port[xi.node][xi.interface][index].port_fdt_node;
  1140. }
  1141. /**
  1142. * Search for a port based on its FDT node offset
  1143. *
  1144. * @param of_offset Node offset of port to search for
  1145. * @param[out] xiface xinterface of match
  1146. * @param[out] index port index of match
  1147. *
  1148. * Return: 0 if found, -1 if not found
  1149. */
  1150. int cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(int of_offset, int *xiface, int *index)
  1151. {
  1152. int iface;
  1153. int i;
  1154. int node;
  1155. struct cvmx_cfg_port_param *pcfg = NULL;
  1156. *xiface = -1;
  1157. *index = -1;
  1158. for (node = 0; node < CVMX_MAX_NODES; node++) {
  1159. for (iface = 0; iface < CVMX_HELPER_MAX_IFACE; iface++) {
  1160. for (i = 0; i < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; i++) {
  1161. pcfg = &cvmx_cfg_port[node][iface][i];
  1162. if (pcfg->valid && pcfg->port_fdt_node == of_offset) {
  1163. *xiface = cvmx_helper_node_interface_to_xiface(node, iface);
  1164. *index = i;
  1165. return 0;
  1166. }
  1167. }
  1168. }
  1169. }
  1170. return -1;
  1171. }
  1172. /**
  1173. * @INTERNAL
  1174. * Store the FDT node offset in the device tree of a phy
  1175. *
  1176. * @param xiface node and interface
  1177. * @param index port index
  1178. * @param node_offset node offset to store
  1179. */
  1180. void cvmx_helper_set_phy_fdt_node_offset(int xiface, int index, int node_offset)
  1181. {
  1182. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1183. if (!port_cfg_data_initialized)
  1184. cvmx_init_port_cfg();
  1185. cvmx_cfg_port[xi.node][xi.interface][index].phy_fdt_node = node_offset;
  1186. }
  1187. /**
  1188. * @INTERNAL
  1189. * Return the FDT node offset in the device tree of a phy
  1190. *
  1191. * @param xiface node and interface
  1192. * @param index port index
  1193. * Return: node offset of phy or -1 if invalid
  1194. */
  1195. int cvmx_helper_get_phy_fdt_node_offset(int xiface, int index)
  1196. {
  1197. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1198. if (!port_cfg_data_initialized)
  1199. cvmx_init_port_cfg();
  1200. return cvmx_cfg_port[xi.node][xi.interface][index].phy_fdt_node;
  1201. }
  1202. /**
  1203. * @INTERNAL
  1204. * Override default autonegotiation for a port
  1205. *
  1206. * @param xiface node and interface
  1207. * @param index port index
  1208. * @param enable true to enable autonegotiation, false to force full
  1209. * duplex, full speed.
  1210. */
  1211. void cvmx_helper_set_port_autonegotiation(int xiface, int index, bool enable)
  1212. {
  1213. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1214. if (!port_cfg_data_initialized)
  1215. cvmx_init_port_cfg();
  1216. cvmx_cfg_port[xi.node][xi.interface][index].disable_an = !enable;
  1217. }
  1218. /**
  1219. * @INTERNAL
  1220. * Returns if autonegotiation is enabled or not.
  1221. *
  1222. * @param xiface node and interface
  1223. * @param index port index
  1224. *
  1225. * Return: 0 if autonegotiation is disabled, 1 if enabled.
  1226. */
  1227. bool cvmx_helper_get_port_autonegotiation(int xiface, int index)
  1228. {
  1229. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1230. if (!port_cfg_data_initialized)
  1231. cvmx_init_port_cfg();
  1232. return !cvmx_cfg_port[xi.node][xi.interface][index].disable_an;
  1233. }
  1234. /**
  1235. * @INTERNAL
  1236. * Override default forward error correction for a port
  1237. *
  1238. * @param xiface node and interface
  1239. * @param index port index
  1240. * @param enable true to enable fec, false to disable it
  1241. */
  1242. void cvmx_helper_set_port_fec(int xiface, int index, bool enable)
  1243. {
  1244. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1245. if (!port_cfg_data_initialized)
  1246. cvmx_init_port_cfg();
  1247. cvmx_cfg_port[xi.node][xi.interface][index].enable_fec = enable;
  1248. }
  1249. /**
  1250. * @INTERNAL
  1251. * Returns if forward error correction is enabled or not.
  1252. *
  1253. * @param xiface node and interface
  1254. * @param index port index
  1255. *
  1256. * Return: false if fec is disabled, true if enabled.
  1257. */
  1258. bool cvmx_helper_get_port_fec(int xiface, int index)
  1259. {
  1260. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1261. if (!port_cfg_data_initialized)
  1262. cvmx_init_port_cfg();
  1263. return cvmx_cfg_port[xi.node][xi.interface][index].enable_fec;
  1264. }
  1265. /**
  1266. * @INTERNAL
  1267. * Configure the SRIO RX interface AGC settings for host mode
  1268. *
  1269. * @param xiface node and interface
  1270. * @param index lane
  1271. * @param long_run true for long run, false for short run
  1272. * @param agc_override true to put AGC in manual mode
  1273. * @param ctle_zero RX equalizer peaking control (default 0x6)
  1274. * @param agc_pre_ctle AGC pre-CTLE gain (default 0x5)
  1275. * @param agc_post_ctle AGC post-CTLE gain (default 0x4)
  1276. *
  1277. * NOTE: This must be called before SRIO is initialized to take effect
  1278. */
  1279. void cvmx_helper_set_srio_rx(int xiface, int index, bool long_run, bool ctle_zero_override,
  1280. u8 ctle_zero, bool agc_override, uint8_t agc_pre_ctle,
  1281. uint8_t agc_post_ctle)
  1282. {
  1283. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1284. struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
  1285. struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
  1286. if (!port_cfg_data_initialized)
  1287. cvmx_init_port_cfg();
  1288. sr->srio_rx_ctle_zero_override = ctle_zero_override;
  1289. sr->srio_rx_ctle_zero = ctle_zero;
  1290. sr->srio_rx_ctle_agc_override = agc_override;
  1291. sr->srio_rx_agc_pre_ctle = agc_pre_ctle;
  1292. sr->srio_rx_agc_post_ctle = agc_post_ctle;
  1293. }
  1294. /**
  1295. * @INTERNAL
  1296. * Get the SRIO RX interface AGC settings for host mode
  1297. *
  1298. * @param xiface node and interface
  1299. * @param index lane
  1300. * @param long_run true for long run, false for short run
  1301. * @param[out] agc_override true to put AGC in manual mode
  1302. * @param[out] ctle_zero RX equalizer peaking control (default 0x6)
  1303. * @param[out] agc_pre_ctle AGC pre-CTLE gain (default 0x5)
  1304. * @param[out] agc_post_ctle AGC post-CTLE gain (default 0x4)
  1305. */
  1306. void cvmx_helper_get_srio_rx(int xiface, int index, bool long_run, bool *ctle_zero_override,
  1307. u8 *ctle_zero, bool *agc_override, uint8_t *agc_pre_ctle,
  1308. uint8_t *agc_post_ctle)
  1309. {
  1310. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1311. struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
  1312. struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
  1313. if (!port_cfg_data_initialized)
  1314. cvmx_init_port_cfg();
  1315. if (ctle_zero_override)
  1316. *ctle_zero_override = sr->srio_rx_ctle_zero_override;
  1317. if (ctle_zero)
  1318. *ctle_zero = sr->srio_rx_ctle_zero;
  1319. if (agc_override)
  1320. *agc_override = sr->srio_rx_ctle_agc_override;
  1321. if (agc_pre_ctle)
  1322. *agc_pre_ctle = sr->srio_rx_agc_pre_ctle;
  1323. if (agc_post_ctle)
  1324. *agc_post_ctle = sr->srio_rx_agc_post_ctle;
  1325. }
  1326. /**
  1327. * @INTERNAL
  1328. * Configure the SRIO TX interface for host mode
  1329. *
  1330. * @param xiface node and interface
  1331. * @param index lane
  1332. * @param long_run true for long run, false for short run
  1333. * @param tx_swing tx swing value to use (default 0x7), -1 to not
  1334. * override.
  1335. * @param tx_gain PCS SDS TX gain (default 0x3), -1 to not
  1336. * override
  1337. * @param tx_premptap_override true to override preemphasis control
  1338. * @param tx_premptap_pre preemphasis pre tap value (default 0x0)
  1339. * @param tx_premptap_post preemphasis post tap value (default 0xF)
  1340. * @param tx_vboost vboost enable (1 = enable, -1 = don't override)
  1341. * hardware default is 1.
  1342. *
  1343. * NOTE: This must be called before SRIO is initialized to take effect
  1344. */
  1345. void cvmx_helper_set_srio_tx(int xiface, int index, bool long_run, int tx_swing, int tx_gain,
  1346. bool tx_premptap_override, uint8_t tx_premptap_pre,
  1347. u8 tx_premptap_post, int tx_vboost)
  1348. {
  1349. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1350. struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
  1351. struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
  1352. if (!port_cfg_data_initialized)
  1353. cvmx_init_port_cfg();
  1354. sr->srio_tx_swing_override = (tx_swing != -1);
  1355. sr->srio_tx_swing = tx_swing != -1 ? tx_swing : 0x7;
  1356. sr->srio_tx_gain_override = (tx_gain != -1);
  1357. sr->srio_tx_gain = tx_gain != -1 ? tx_gain : 0x3;
  1358. sr->srio_tx_premptap_override = tx_premptap_override;
  1359. sr->srio_tx_premptap_pre = tx_premptap_override ? tx_premptap_pre : 0;
  1360. sr->srio_tx_premptap_post = tx_premptap_override ? tx_premptap_post : 0xF;
  1361. sr->srio_tx_vboost_override = tx_vboost != -1;
  1362. sr->srio_tx_vboost = (tx_vboost != -1) ? tx_vboost : 1;
  1363. }
  1364. /**
  1365. * @INTERNAL
  1366. * Get the SRIO TX interface settings for host mode
  1367. *
  1368. * @param xiface node and interface
  1369. * @param index lane
  1370. * @param long_run true for long run, false for short run
  1371. * @param[out] tx_swing_override true to override pcs_sds_txX_swing
  1372. * @param[out] tx_swing tx swing value to use (default 0x7)
  1373. * @param[out] tx_gain_override true to override default gain
  1374. * @param[out] tx_gain PCS SDS TX gain (default 0x3)
  1375. * @param[out] tx_premptap_override true to override preemphasis control
  1376. * @param[out] tx_premptap_pre preemphasis pre tap value (default 0x0)
  1377. * @param[out] tx_premptap_post preemphasis post tap value (default 0xF)
  1378. * @param[out] tx_vboost_override override vboost setting
  1379. * @param[out] tx_vboost vboost enable (default true)
  1380. */
  1381. void cvmx_helper_get_srio_tx(int xiface, int index, bool long_run, bool *tx_swing_override,
  1382. u8 *tx_swing, bool *tx_gain_override, uint8_t *tx_gain,
  1383. bool *tx_premptap_override, uint8_t *tx_premptap_pre,
  1384. u8 *tx_premptap_post, bool *tx_vboost_override, bool *tx_vboost)
  1385. {
  1386. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1387. struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
  1388. struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
  1389. if (!port_cfg_data_initialized)
  1390. cvmx_init_port_cfg();
  1391. if (tx_swing_override)
  1392. *tx_swing_override = sr->srio_tx_swing_override;
  1393. if (tx_swing)
  1394. *tx_swing = sr->srio_tx_swing;
  1395. if (tx_gain_override)
  1396. *tx_gain_override = sr->srio_tx_gain_override;
  1397. if (tx_gain)
  1398. *tx_gain = sr->srio_tx_gain;
  1399. if (tx_premptap_override)
  1400. *tx_premptap_override = sr->srio_tx_premptap_override;
  1401. if (tx_premptap_pre)
  1402. *tx_premptap_pre = sr->srio_tx_premptap_pre;
  1403. if (tx_premptap_post)
  1404. *tx_premptap_post = sr->srio_tx_premptap_post;
  1405. if (tx_vboost_override)
  1406. *tx_vboost_override = sr->srio_tx_vboost_override;
  1407. if (tx_vboost)
  1408. *tx_vboost = sr->srio_tx_vboost;
  1409. }
  1410. /**
  1411. * @INTERNAL
  1412. * Sets the PHY info data structure
  1413. *
  1414. * @param xiface node and interface
  1415. * @param index port index
  1416. * @param[in] phy_info phy information data structure pointer
  1417. */
  1418. void cvmx_helper_set_port_phy_info(int xiface, int index, struct cvmx_phy_info *phy_info)
  1419. {
  1420. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1421. if (!port_cfg_data_initialized)
  1422. cvmx_init_port_cfg();
  1423. cvmx_cfg_port[xi.node][xi.interface][index].phy_info = phy_info;
  1424. }
  1425. /**
  1426. * @INTERNAL
  1427. * Returns the PHY information data structure for a port
  1428. *
  1429. * @param xiface node and interface
  1430. * @param index port index
  1431. *
  1432. * Return: pointer to PHY information data structure or NULL if not set
  1433. */
  1434. struct cvmx_phy_info *cvmx_helper_get_port_phy_info(int xiface, int index)
  1435. {
  1436. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1437. if (!port_cfg_data_initialized)
  1438. cvmx_init_port_cfg();
  1439. return cvmx_cfg_port[xi.node][xi.interface][index].phy_info;
  1440. }
  1441. /**
  1442. * @INTERNAL
  1443. * Returns a pointer to the PHY LED configuration (if local GPIOs drive them)
  1444. *
  1445. * @param xiface node and interface
  1446. * @param index portindex
  1447. *
  1448. * Return: pointer to the PHY LED information data structure or NULL if not
  1449. * present
  1450. */
  1451. struct cvmx_phy_gpio_leds *cvmx_helper_get_port_phy_leds(int xiface, int index)
  1452. {
  1453. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1454. if (!port_cfg_data_initialized)
  1455. cvmx_init_port_cfg();
  1456. return cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds;
  1457. }
  1458. /**
  1459. * @INTERNAL
  1460. * Sets a pointer to the PHY LED configuration (if local GPIOs drive them)
  1461. *
  1462. * @param xiface node and interface
  1463. * @param index portindex
  1464. * @param leds pointer to led data structure
  1465. */
  1466. void cvmx_helper_set_port_phy_leds(int xiface, int index, struct cvmx_phy_gpio_leds *leds)
  1467. {
  1468. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1469. if (!port_cfg_data_initialized)
  1470. cvmx_init_port_cfg();
  1471. cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds = leds;
  1472. }
  1473. /**
  1474. * @INTERNAL
  1475. * Disables RGMII TX clock bypass and sets delay value
  1476. *
  1477. * @param xiface node and interface
  1478. * @param index portindex
  1479. * @param bypass Set true to enable the clock bypass and false
  1480. * to sync clock and data synchronously.
  1481. * Default is false.
  1482. * @param clk_delay Delay value to skew TXC from TXD
  1483. */
  1484. void cvmx_helper_cfg_set_rgmii_tx_clk_delay(int xiface, int index, bool bypass, int clk_delay)
  1485. {
  1486. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1487. if (!port_cfg_data_initialized)
  1488. cvmx_init_port_cfg();
  1489. cvmx_cfg_port[xi.node][xi.interface][index].tx_clk_delay_bypass = bypass;
  1490. cvmx_cfg_port[xi.node][xi.interface][index].rgmii_tx_clk_delay = clk_delay;
  1491. }
  1492. /**
  1493. * @INTERNAL
  1494. * Gets RGMII TX clock bypass and delay value
  1495. *
  1496. * @param xiface node and interface
  1497. * @param index portindex
  1498. * @param bypass Set true to enable the clock bypass and false
  1499. * to sync clock and data synchronously.
  1500. * Default is false.
  1501. * @param clk_delay Delay value to skew TXC from TXD, default is 0.
  1502. */
  1503. void cvmx_helper_cfg_get_rgmii_tx_clk_delay(int xiface, int index, bool *bypass, int *clk_delay)
  1504. {
  1505. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1506. if (!port_cfg_data_initialized)
  1507. cvmx_init_port_cfg();
  1508. *bypass = cvmx_cfg_port[xi.node][xi.interface][index].tx_clk_delay_bypass;
  1509. *clk_delay = cvmx_cfg_port[xi.node][xi.interface][index].rgmii_tx_clk_delay;
  1510. }
  1511. /**
  1512. * @INTERNAL
  1513. * Retrieve the SFP node offset in the device tree
  1514. *
  1515. * @param xiface node and interface
  1516. * @param index port index
  1517. *
  1518. * Return: offset in device tree or -1 if error or not defined.
  1519. */
  1520. int cvmx_helper_cfg_get_sfp_fdt_offset(int xiface, int index)
  1521. {
  1522. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1523. if (!port_cfg_data_initialized)
  1524. cvmx_init_port_cfg();
  1525. return cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset;
  1526. }
  1527. /**
  1528. * @INTERNAL
  1529. * Sets the SFP node offset
  1530. *
  1531. * @param xiface node and interface
  1532. * @param index port index
  1533. * @param sfp_of_offset Offset of SFP node in device tree
  1534. */
  1535. void cvmx_helper_cfg_set_sfp_fdt_offset(int xiface, int index, int sfp_of_offset)
  1536. {
  1537. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1538. if (!port_cfg_data_initialized)
  1539. cvmx_init_port_cfg();
  1540. cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset = sfp_of_offset;
  1541. }
  1542. /**
  1543. * Get data structure defining the Microsemi VSC7224 channel info
  1544. * or NULL if not present
  1545. *
  1546. * @param xiface node and interface
  1547. * @param index port index
  1548. *
  1549. * Return: pointer to vsc7224 data structure or NULL if not present
  1550. */
  1551. struct cvmx_vsc7224_chan *cvmx_helper_cfg_get_vsc7224_chan_info(int xiface, int index)
  1552. {
  1553. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1554. if (!port_cfg_data_initialized)
  1555. cvmx_init_port_cfg();
  1556. return cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan;
  1557. }
  1558. /**
  1559. * Sets the Microsemi VSC7224 channel info data structure
  1560. *
  1561. * @param xiface node and interface
  1562. * @param index port index
  1563. * @param[in] vsc7224_info Microsemi VSC7224 data structure
  1564. */
  1565. void cvmx_helper_cfg_set_vsc7224_chan_info(int xiface, int index,
  1566. struct cvmx_vsc7224_chan *vsc7224_chan_info)
  1567. {
  1568. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1569. if (!port_cfg_data_initialized)
  1570. cvmx_init_port_cfg();
  1571. cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan = vsc7224_chan_info;
  1572. }
  1573. /**
  1574. * Get data structure defining the Avago AVSP5410 phy info
  1575. * or NULL if not present
  1576. *
  1577. * @param xiface node and interface
  1578. * @param index port index
  1579. *
  1580. * Return: pointer to avsp5410 data structure or NULL if not present
  1581. */
  1582. struct cvmx_avsp5410 *cvmx_helper_cfg_get_avsp5410_info(int xiface, int index)
  1583. {
  1584. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1585. if (!port_cfg_data_initialized)
  1586. cvmx_init_port_cfg();
  1587. return cvmx_cfg_port[xi.node][xi.interface][index].avsp5410;
  1588. }
  1589. /**
  1590. * Sets the Avago AVSP5410 phy info data structure
  1591. *
  1592. * @param xiface node and interface
  1593. * @param index port index
  1594. * @param[in] avsp5410_info Avago AVSP5410 data structure
  1595. */
  1596. void cvmx_helper_cfg_set_avsp5410_info(int xiface, int index, struct cvmx_avsp5410 *avsp5410_info)
  1597. {
  1598. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1599. if (!port_cfg_data_initialized)
  1600. cvmx_init_port_cfg();
  1601. cvmx_cfg_port[xi.node][xi.interface][index].avsp5410 = avsp5410_info;
  1602. }
  1603. /**
  1604. * Gets the SFP data associated with a port
  1605. *
  1606. * @param xiface node and interface
  1607. * @param index port index
  1608. *
  1609. * Return: pointer to SFP data structure or NULL if none
  1610. */
  1611. struct cvmx_fdt_sfp_info *cvmx_helper_cfg_get_sfp_info(int xiface, int index)
  1612. {
  1613. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1614. if (!port_cfg_data_initialized)
  1615. cvmx_init_port_cfg();
  1616. return cvmx_cfg_port[xi.node][xi.interface][index].sfp_info;
  1617. }
  1618. /**
  1619. * Sets the SFP data associated with a port
  1620. *
  1621. * @param xiface node and interface
  1622. * @param index port index
  1623. * @param[in] sfp_info port SFP data or NULL for none
  1624. */
  1625. void cvmx_helper_cfg_set_sfp_info(int xiface, int index, struct cvmx_fdt_sfp_info *sfp_info)
  1626. {
  1627. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1628. if (!port_cfg_data_initialized)
  1629. cvmx_init_port_cfg();
  1630. cvmx_cfg_port[xi.node][xi.interface][index].sfp_info = sfp_info;
  1631. }
  1632. /**
  1633. * Returns a pointer to the phy device associated with a port
  1634. *
  1635. * @param xiface node and interface
  1636. * @param index port index
  1637. *
  1638. * return pointer to phy device or NULL if none
  1639. */
  1640. struct phy_device *cvmx_helper_cfg_get_phy_device(int xiface, int index)
  1641. {
  1642. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1643. if (!port_cfg_data_initialized)
  1644. cvmx_init_port_cfg();
  1645. return cvmx_cfg_port[xi.node][xi.interface][index].phydev;
  1646. }
  1647. /**
  1648. * Sets the phy device associated with a port
  1649. *
  1650. * @param xiface node and interface
  1651. * @param index port index
  1652. * @param[in] phydev phy device to assiciate
  1653. */
  1654. void cvmx_helper_cfg_set_phy_device(int xiface, int index, struct phy_device *phydev)
  1655. {
  1656. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  1657. if (!port_cfg_data_initialized)
  1658. cvmx_init_port_cfg();
  1659. cvmx_cfg_port[xi.node][xi.interface][index].phydev = phydev;
  1660. }