cvmx-helper-util.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Marvell International Ltd.
  4. *
  5. * Small helper utilities.
  6. */
  7. #include <log.h>
  8. #include <time.h>
  9. #include <linux/delay.h>
  10. #include <mach/cvmx-regs.h>
  11. #include <mach/cvmx-csr-enums.h>
  12. #include <mach/octeon-model.h>
  13. #include <mach/octeon-feature.h>
  14. #include <mach/cvmx-gmxx-defs.h>
  15. #include <mach/cvmx-ipd-defs.h>
  16. #include <mach/cvmx-pko-defs.h>
  17. #include <mach/cvmx-ipd.h>
  18. #include <mach/cvmx-hwpko.h>
  19. #include <mach/cvmx-pki.h>
  20. #include <mach/cvmx-pip.h>
  21. #include <mach/cvmx-helper.h>
  22. #include <mach/cvmx-helper-util.h>
  23. #include <mach/cvmx-helper-pki.h>
  24. /**
  25. * @INTERNAL
  26. * These are the interface types needed to convert interface numbers to ipd
  27. * ports.
  28. *
  29. * @param GMII
  30. * This type is used for sgmii, rgmii, xaui and rxaui interfaces.
  31. * @param ILK
  32. * This type is used for ilk interfaces.
  33. * @param SRIO
  34. * This type is used for serial-RapidIo interfaces.
  35. * @param NPI
  36. * This type is used for npi interfaces.
  37. * @param LB
  38. * This type is used for loopback interfaces.
  39. * @param INVALID_IF_TYPE
  40. * This type indicates the interface hasn't been configured.
  41. */
  42. enum port_map_if_type { INVALID_IF_TYPE = 0, GMII, ILK, SRIO, NPI, LB };
  43. /**
  44. * @INTERNAL
  45. * This structure is used to map interface numbers to ipd ports.
  46. *
  47. * @param type
  48. * Interface type
  49. * @param first_ipd_port
  50. * First IPD port number assigned to this interface.
  51. * @param last_ipd_port
  52. * Last IPD port number assigned to this interface.
  53. * @param ipd_port_adj
  54. * Different octeon chips require different ipd ports for the
  55. * same interface port/mode configuration. This value is used
  56. * to account for that difference.
  57. */
  58. struct ipd_port_map {
  59. enum port_map_if_type type;
  60. int first_ipd_port;
  61. int last_ipd_port;
  62. int ipd_port_adj;
  63. };
  64. /**
  65. * @INTERNAL
  66. * Interface number to ipd port map for the octeon 68xx.
  67. */
  68. static const struct ipd_port_map ipd_port_map_68xx[CVMX_HELPER_MAX_IFACE] = {
  69. { GMII, 0x800, 0x8ff, 0x40 }, /* Interface 0 */
  70. { GMII, 0x900, 0x9ff, 0x40 }, /* Interface 1 */
  71. { GMII, 0xa00, 0xaff, 0x40 }, /* Interface 2 */
  72. { GMII, 0xb00, 0xbff, 0x40 }, /* Interface 3 */
  73. { GMII, 0xc00, 0xcff, 0x40 }, /* Interface 4 */
  74. { ILK, 0x400, 0x4ff, 0x00 }, /* Interface 5 */
  75. { ILK, 0x500, 0x5ff, 0x00 }, /* Interface 6 */
  76. { NPI, 0x100, 0x120, 0x00 }, /* Interface 7 */
  77. { LB, 0x000, 0x008, 0x00 }, /* Interface 8 */
  78. };
  79. /**
  80. * @INTERNAL
  81. * Interface number to ipd port map for the octeon 78xx.
  82. *
  83. * This mapping corresponds to WQE(CHAN) enumeration in
  84. * HRM Sections 11.15, PKI_CHAN_E, Section 11.6
  85. *
  86. */
  87. static const struct ipd_port_map ipd_port_map_78xx[CVMX_HELPER_MAX_IFACE] = {
  88. { GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX0 */
  89. { GMII, 0x900, 0x93f, 0x00 }, /* Interface 1 -BGX1 */
  90. { GMII, 0xa00, 0xa3f, 0x00 }, /* Interface 2 -BGX2 */
  91. { GMII, 0xb00, 0xb3f, 0x00 }, /* Interface 3 - BGX3 */
  92. { GMII, 0xc00, 0xc3f, 0x00 }, /* Interface 4 - BGX4 */
  93. { GMII, 0xd00, 0xd3f, 0x00 }, /* Interface 5 - BGX5 */
  94. { ILK, 0x400, 0x4ff, 0x00 }, /* Interface 6 - ILK0 */
  95. { ILK, 0x500, 0x5ff, 0x00 }, /* Interface 7 - ILK1 */
  96. { NPI, 0x100, 0x13f, 0x00 }, /* Interface 8 - DPI */
  97. { LB, 0x000, 0x03f, 0x00 }, /* Interface 9 - LOOPBACK */
  98. };
  99. /**
  100. * @INTERNAL
  101. * Interface number to ipd port map for the octeon 73xx.
  102. */
  103. static const struct ipd_port_map ipd_port_map_73xx[CVMX_HELPER_MAX_IFACE] = {
  104. { GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX(0,0-3) */
  105. { GMII, 0x900, 0x93f, 0x00 }, /* Interface 1 -BGX(1,0-3) */
  106. { GMII, 0xa00, 0xa3f, 0x00 }, /* Interface 2 -BGX(2,0-3) */
  107. { NPI, 0x100, 0x17f, 0x00 }, /* Interface 3 - DPI */
  108. { LB, 0x000, 0x03f, 0x00 }, /* Interface 4 - LOOPBACK */
  109. };
  110. /**
  111. * @INTERNAL
  112. * Interface number to ipd port map for the octeon 75xx.
  113. */
  114. static const struct ipd_port_map ipd_port_map_75xx[CVMX_HELPER_MAX_IFACE] = {
  115. { GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX0 */
  116. { SRIO, 0x240, 0x241, 0x00 }, /* Interface 1 - SRIO 0 */
  117. { SRIO, 0x242, 0x243, 0x00 }, /* Interface 2 - SRIO 1 */
  118. { NPI, 0x100, 0x13f, 0x00 }, /* Interface 3 - DPI */
  119. { LB, 0x000, 0x03f, 0x00 }, /* Interface 4 - LOOPBACK */
  120. };
  121. /**
  122. * Convert a interface mode into a human readable string
  123. *
  124. * @param mode Mode to convert
  125. *
  126. * Return: String
  127. */
  128. const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode)
  129. {
  130. switch (mode) {
  131. case CVMX_HELPER_INTERFACE_MODE_DISABLED:
  132. return "DISABLED";
  133. case CVMX_HELPER_INTERFACE_MODE_RGMII:
  134. return "RGMII";
  135. case CVMX_HELPER_INTERFACE_MODE_GMII:
  136. return "GMII";
  137. case CVMX_HELPER_INTERFACE_MODE_SPI:
  138. return "SPI";
  139. case CVMX_HELPER_INTERFACE_MODE_PCIE:
  140. return "PCIE";
  141. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  142. return "XAUI";
  143. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  144. return "RXAUI";
  145. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  146. return "SGMII";
  147. case CVMX_HELPER_INTERFACE_MODE_QSGMII:
  148. return "QSGMII";
  149. case CVMX_HELPER_INTERFACE_MODE_PICMG:
  150. return "PICMG";
  151. case CVMX_HELPER_INTERFACE_MODE_NPI:
  152. return "NPI";
  153. case CVMX_HELPER_INTERFACE_MODE_LOOP:
  154. return "LOOP";
  155. case CVMX_HELPER_INTERFACE_MODE_SRIO:
  156. return "SRIO";
  157. case CVMX_HELPER_INTERFACE_MODE_ILK:
  158. return "ILK";
  159. case CVMX_HELPER_INTERFACE_MODE_AGL:
  160. return "AGL";
  161. case CVMX_HELPER_INTERFACE_MODE_XLAUI:
  162. return "XLAUI";
  163. case CVMX_HELPER_INTERFACE_MODE_XFI:
  164. return "XFI";
  165. case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
  166. return "40G_KR4";
  167. case CVMX_HELPER_INTERFACE_MODE_10G_KR:
  168. return "10G_KR";
  169. case CVMX_HELPER_INTERFACE_MODE_MIXED:
  170. return "MIXED";
  171. }
  172. return "UNKNOWN";
  173. }
  174. /**
  175. * Debug routine to dump the packet structure to the console
  176. *
  177. * @param work Work queue entry containing the packet to dump
  178. * @return
  179. */
  180. int cvmx_helper_dump_packet(cvmx_wqe_t *work)
  181. {
  182. u64 count;
  183. u64 remaining_bytes;
  184. union cvmx_buf_ptr buffer_ptr;
  185. cvmx_buf_ptr_pki_t bptr;
  186. cvmx_wqe_78xx_t *wqe = (void *)work;
  187. u64 start_of_buffer;
  188. u8 *data_address;
  189. u8 *end_of_data;
  190. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  191. cvmx_pki_dump_wqe(wqe);
  192. cvmx_wqe_pki_errata_20776(work);
  193. } else {
  194. debug("WORD0 = %lx\n", (unsigned long)work->word0.u64);
  195. debug("WORD1 = %lx\n", (unsigned long)work->word1.u64);
  196. debug("WORD2 = %lx\n", (unsigned long)work->word2.u64);
  197. debug("Packet Length: %u\n", cvmx_wqe_get_len(work));
  198. debug(" Input Port: %u\n", cvmx_wqe_get_port(work));
  199. debug(" QoS: %u\n", cvmx_wqe_get_qos(work));
  200. debug(" Buffers: %u\n", cvmx_wqe_get_bufs(work));
  201. }
  202. if (cvmx_wqe_get_bufs(work) == 0) {
  203. int wqe_pool;
  204. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  205. debug("%s: ERROR: Unexpected bufs==0 in WQE\n", __func__);
  206. return -1;
  207. }
  208. wqe_pool = (int)cvmx_fpa_get_wqe_pool();
  209. buffer_ptr.u64 = 0;
  210. buffer_ptr.s.pool = wqe_pool;
  211. buffer_ptr.s.size = 128;
  212. buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
  213. if (cvmx_likely(!work->word2.s.not_IP)) {
  214. union cvmx_pip_ip_offset pip_ip_offset;
  215. pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
  216. buffer_ptr.s.addr +=
  217. (pip_ip_offset.s.offset << 3) - work->word2.s.ip_offset;
  218. buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
  219. } else {
  220. /*
  221. * WARNING: This code assume that the packet
  222. * is not RAW. If it was, we would use
  223. * PIP_GBL_CFG[RAW_SHF] instead of
  224. * PIP_GBL_CFG[NIP_SHF].
  225. */
  226. union cvmx_pip_gbl_cfg pip_gbl_cfg;
  227. pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
  228. buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
  229. }
  230. } else {
  231. buffer_ptr = work->packet_ptr;
  232. }
  233. remaining_bytes = cvmx_wqe_get_len(work);
  234. while (remaining_bytes) {
  235. /* native cn78xx buffer format, unless legacy-translated */
  236. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) && !wqe->pki_wqe_translated) {
  237. bptr.u64 = buffer_ptr.u64;
  238. /* XXX- assumes cache-line aligned buffer */
  239. start_of_buffer = (bptr.addr >> 7) << 7;
  240. debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
  241. debug(" Buffer Data: %llx\n", (unsigned long long)bptr.addr);
  242. debug(" Buffer Size: %u\n", bptr.size);
  243. data_address = (uint8_t *)cvmx_phys_to_ptr(bptr.addr);
  244. end_of_data = data_address + bptr.size;
  245. } else {
  246. start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
  247. debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
  248. debug(" Buffer I : %u\n", buffer_ptr.s.i);
  249. debug(" Buffer Back: %u\n", buffer_ptr.s.back);
  250. debug(" Buffer Pool: %u\n", buffer_ptr.s.pool);
  251. debug(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
  252. debug(" Buffer Size: %u\n", buffer_ptr.s.size);
  253. data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
  254. end_of_data = data_address + buffer_ptr.s.size;
  255. }
  256. debug("\t\t");
  257. count = 0;
  258. while (data_address < end_of_data) {
  259. if (remaining_bytes == 0)
  260. break;
  261. remaining_bytes--;
  262. debug("%02x", (unsigned int)*data_address);
  263. data_address++;
  264. if (remaining_bytes && count == 7) {
  265. debug("\n\t\t");
  266. count = 0;
  267. } else {
  268. count++;
  269. }
  270. }
  271. debug("\n");
  272. if (remaining_bytes) {
  273. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
  274. !wqe->pki_wqe_translated)
  275. buffer_ptr.u64 = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
  276. else
  277. buffer_ptr.u64 =
  278. *(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
  279. }
  280. }
  281. return 0;
  282. }
  283. /**
  284. * @INTERNAL
  285. *
  286. * Extract NO_WPTR mode from PIP/IPD register
  287. */
  288. static int __cvmx_ipd_mode_no_wptr(void)
  289. {
  290. if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
  291. cvmx_ipd_ctl_status_t ipd_ctl_status;
  292. ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
  293. return ipd_ctl_status.s.no_wptr;
  294. }
  295. return 0;
  296. }
  297. static cvmx_buf_ptr_t __cvmx_packet_short_ptr[4];
  298. static int8_t __cvmx_wqe_pool = -1;
  299. /**
  300. * @INTERNAL
  301. * Prepare packet pointer templace for dynamic short
  302. * packets.
  303. */
  304. static void cvmx_packet_short_ptr_calculate(void)
  305. {
  306. unsigned int i, off;
  307. union cvmx_pip_gbl_cfg pip_gbl_cfg;
  308. union cvmx_pip_ip_offset pip_ip_offset;
  309. /* Fill in the common values for all cases */
  310. for (i = 0; i < 4; i++) {
  311. if (__cvmx_ipd_mode_no_wptr())
  312. /* packet pool, set to 0 in hardware */
  313. __cvmx_wqe_pool = 0;
  314. else
  315. /* WQE pool as configured */
  316. __cvmx_wqe_pool = csr_rd(CVMX_IPD_WQE_FPA_QUEUE) & 7;
  317. __cvmx_packet_short_ptr[i].s.pool = __cvmx_wqe_pool;
  318. __cvmx_packet_short_ptr[i].s.size = cvmx_fpa_get_block_size(__cvmx_wqe_pool);
  319. __cvmx_packet_short_ptr[i].s.size -= 32;
  320. __cvmx_packet_short_ptr[i].s.addr = 32;
  321. }
  322. pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
  323. pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
  324. /* RAW_FULL: index = 0 */
  325. i = 0;
  326. off = pip_gbl_cfg.s.raw_shf;
  327. __cvmx_packet_short_ptr[i].s.addr += off;
  328. __cvmx_packet_short_ptr[i].s.size -= off;
  329. __cvmx_packet_short_ptr[i].s.back += off >> 7;
  330. /* NON-IP: index = 1 */
  331. i = 1;
  332. off = pip_gbl_cfg.s.nip_shf;
  333. __cvmx_packet_short_ptr[i].s.addr += off;
  334. __cvmx_packet_short_ptr[i].s.size -= off;
  335. __cvmx_packet_short_ptr[i].s.back += off >> 7;
  336. /* IPv4: index = 2 */
  337. i = 2;
  338. off = (pip_ip_offset.s.offset << 3) + 4;
  339. __cvmx_packet_short_ptr[i].s.addr += off;
  340. __cvmx_packet_short_ptr[i].s.size -= off;
  341. __cvmx_packet_short_ptr[i].s.back += off >> 7;
  342. /* IPv6: index = 3 */
  343. i = 3;
  344. off = (pip_ip_offset.s.offset << 3) + 0;
  345. __cvmx_packet_short_ptr[i].s.addr += off;
  346. __cvmx_packet_short_ptr[i].s.size -= off;
  347. __cvmx_packet_short_ptr[i].s.back += off >> 7;
  348. /* For IPv4/IPv6: subtract work->word2.s.ip_offset
  349. * to addr, if it is smaller than IP_OFFSET[OFFSET]*8
  350. * which is stored in __cvmx_packet_short_ptr[3].s.addr
  351. */
  352. }
  353. /**
  354. * Extract packet data buffer pointer from work queue entry.
  355. *
  356. * Returns the legacy (Octeon1/Octeon2) buffer pointer structure
  357. * for the linked buffer list.
  358. * On CN78XX, the native buffer pointer structure is converted into
  359. * the legacy format.
  360. * The legacy buf_ptr is then stored in the WQE, and word0 reserved
  361. * field is set to indicate that the buffer pointers were translated.
  362. * If the packet data is only found inside the work queue entry,
  363. * a standard buffer pointer structure is created for it.
  364. */
  365. cvmx_buf_ptr_t cvmx_wqe_get_packet_ptr(cvmx_wqe_t *work)
  366. {
  367. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  368. cvmx_wqe_78xx_t *wqe = (void *)work;
  369. cvmx_buf_ptr_t optr, lptr;
  370. cvmx_buf_ptr_pki_t nptr;
  371. unsigned int pool, bufs;
  372. int node = cvmx_get_node_num();
  373. /* In case of repeated calls of this function */
  374. if (wqe->pki_wqe_translated || wqe->word2.software) {
  375. optr.u64 = wqe->packet_ptr.u64;
  376. return optr;
  377. }
  378. bufs = wqe->word0.bufs;
  379. pool = wqe->word0.aura;
  380. nptr.u64 = wqe->packet_ptr.u64;
  381. optr.u64 = 0;
  382. optr.s.pool = pool;
  383. optr.s.addr = nptr.addr;
  384. if (bufs == 1) {
  385. optr.s.size = pki_dflt_pool[node].buffer_size -
  386. pki_dflt_style[node].parm_cfg.first_skip - 8 -
  387. wqe->word0.apad;
  388. } else {
  389. optr.s.size = nptr.size;
  390. }
  391. /* Calculate the "back" offset */
  392. if (!nptr.packet_outside_wqe) {
  393. optr.s.back = (nptr.addr -
  394. cvmx_ptr_to_phys(wqe)) >> 7;
  395. } else {
  396. optr.s.back =
  397. (pki_dflt_style[node].parm_cfg.first_skip +
  398. 8 + wqe->word0.apad) >> 7;
  399. }
  400. lptr = optr;
  401. /* Follow pointer and convert all linked pointers */
  402. while (bufs > 1) {
  403. void *vptr;
  404. vptr = cvmx_phys_to_ptr(lptr.s.addr);
  405. memcpy(&nptr, vptr - 8, 8);
  406. /*
  407. * Errata (PKI-20776) PKI_BUFLINK_S's are endian-swapped
  408. * CN78XX pass 1.x has a bug where the packet pointer
  409. * in each segment is written in the opposite
  410. * endianness of the configured mode. Fix these here
  411. */
  412. if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
  413. nptr.u64 = __builtin_bswap64(nptr.u64);
  414. lptr.u64 = 0;
  415. lptr.s.pool = pool;
  416. lptr.s.addr = nptr.addr;
  417. lptr.s.size = nptr.size;
  418. lptr.s.back = (pki_dflt_style[0].parm_cfg.later_skip + 8) >>
  419. 7; /* TBD: not guaranteed !! */
  420. memcpy(vptr - 8, &lptr, 8);
  421. bufs--;
  422. }
  423. /* Store translated bufptr in WQE, and set indicator */
  424. wqe->pki_wqe_translated = 1;
  425. wqe->packet_ptr.u64 = optr.u64;
  426. return optr;
  427. } else {
  428. unsigned int i;
  429. unsigned int off = 0;
  430. cvmx_buf_ptr_t bptr;
  431. if (cvmx_likely(work->word2.s.bufs > 0))
  432. return work->packet_ptr;
  433. if (cvmx_unlikely(work->word2.s.software))
  434. return work->packet_ptr;
  435. /* first packet, precalculate packet_ptr templaces */
  436. if (cvmx_unlikely(__cvmx_packet_short_ptr[0].u64 == 0))
  437. cvmx_packet_short_ptr_calculate();
  438. /* calculate templace index */
  439. i = work->word2.s_cn38xx.not_IP | work->word2.s_cn38xx.rcv_error;
  440. i = 2 ^ (i << 1);
  441. /* IPv4/IPv6: Adjust IP offset */
  442. if (cvmx_likely(i & 2)) {
  443. i |= work->word2.s.is_v6;
  444. off = work->word2.s.ip_offset;
  445. } else {
  446. /* RAWFULL/RAWSCHED should be handled here */
  447. i = 1; /* not-IP */
  448. off = 0;
  449. }
  450. /* Get the right templace */
  451. bptr = __cvmx_packet_short_ptr[i];
  452. bptr.s.addr -= off;
  453. bptr.s.back = bptr.s.addr >> 7;
  454. /* Add actual WQE paddr to the templace offset */
  455. bptr.s.addr += cvmx_ptr_to_phys(work);
  456. /* Adjust word2.bufs so that _free_data() handles it
  457. * in the same way as PKO
  458. */
  459. work->word2.s.bufs = 1;
  460. /* Store the new buffer pointer back into WQE */
  461. work->packet_ptr = bptr;
  462. /* Returned the synthetic buffer_pointer */
  463. return bptr;
  464. }
  465. }
  466. void cvmx_wqe_free(cvmx_wqe_t *work)
  467. {
  468. unsigned int bufs, ncl = 1;
  469. u64 paddr, paddr1;
  470. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  471. cvmx_wqe_78xx_t *wqe = (void *)work;
  472. cvmx_fpa3_gaura_t aura;
  473. cvmx_buf_ptr_pki_t bptr;
  474. bufs = wqe->word0.bufs;
  475. if (!wqe->pki_wqe_translated && bufs != 0) {
  476. /* Handle cn78xx native untralsated WQE */
  477. bptr = wqe->packet_ptr;
  478. /* Do nothing - first packet buffer shares WQE buffer */
  479. if (!bptr.packet_outside_wqe)
  480. return;
  481. } else if (cvmx_likely(bufs != 0)) {
  482. /* Handle translated 78XX WQE */
  483. paddr = (work->packet_ptr.s.addr & (~0x7full)) -
  484. (work->packet_ptr.s.back << 7);
  485. paddr1 = cvmx_ptr_to_phys(work);
  486. /* do not free WQE if contains first data buffer */
  487. if (paddr == paddr1)
  488. return;
  489. }
  490. /* WQE is separate from packet buffer, free it */
  491. aura = __cvmx_fpa3_gaura(wqe->word0.aura >> 10, wqe->word0.aura & 0x3ff);
  492. cvmx_fpa3_free(work, aura, ncl);
  493. } else {
  494. /* handle legacy WQE */
  495. bufs = work->word2.s_cn38xx.bufs;
  496. if (cvmx_likely(bufs != 0)) {
  497. /* Check if the first data buffer is inside WQE */
  498. paddr = (work->packet_ptr.s.addr & (~0x7full)) -
  499. (work->packet_ptr.s.back << 7);
  500. paddr1 = cvmx_ptr_to_phys(work);
  501. /* do not free WQE if contains first data buffer */
  502. if (paddr == paddr1)
  503. return;
  504. }
  505. /* precalculate packet_ptr, WQE pool number */
  506. if (cvmx_unlikely(__cvmx_wqe_pool < 0))
  507. cvmx_packet_short_ptr_calculate();
  508. cvmx_fpa1_free(work, __cvmx_wqe_pool, ncl);
  509. }
  510. }
  511. /**
  512. * Free the packet buffers contained in a work queue entry.
  513. * The work queue entry is also freed if it contains packet data.
  514. * If however the packet starts outside the WQE, the WQE will
  515. * not be freed. The application should call cvmx_wqe_free()
  516. * to free the WQE buffer that contains no packet data.
  517. *
  518. * @param work Work queue entry with packet to free
  519. */
  520. void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
  521. {
  522. u64 number_buffers;
  523. u64 start_of_buffer;
  524. u64 next_buffer_ptr;
  525. cvmx_fpa3_gaura_t aura;
  526. unsigned int ncl;
  527. cvmx_buf_ptr_t buffer_ptr;
  528. cvmx_buf_ptr_pki_t bptr;
  529. cvmx_wqe_78xx_t *wqe = (void *)work;
  530. int o3_pki_wqe = 0;
  531. number_buffers = cvmx_wqe_get_bufs(work);
  532. buffer_ptr.u64 = work->packet_ptr.u64;
  533. /* Zero-out WQE WORD3 so that the WQE is freed by cvmx_wqe_free() */
  534. work->packet_ptr.u64 = 0;
  535. if (number_buffers == 0)
  536. return;
  537. /* Interpret PKI-style bufptr unless it has been translated */
  538. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
  539. !wqe->pki_wqe_translated) {
  540. o3_pki_wqe = 1;
  541. cvmx_wqe_pki_errata_20776(work);
  542. aura = __cvmx_fpa3_gaura(wqe->word0.aura >> 10,
  543. wqe->word0.aura & 0x3ff);
  544. } else {
  545. start_of_buffer = ((buffer_ptr.s.addr >> 7) -
  546. buffer_ptr.s.back) << 7;
  547. next_buffer_ptr =
  548. *(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
  549. /*
  550. * Since the number of buffers is not zero, we know this is not
  551. * a dynamic short packet. We need to check if it is a packet
  552. * received with IPD_CTL_STATUS[NO_WPTR]. If this is true,
  553. * we need to free all buffers except for the first one.
  554. * The caller doesn't expect their WQE pointer to be freed
  555. */
  556. if (cvmx_ptr_to_phys(work) == start_of_buffer) {
  557. buffer_ptr.u64 = next_buffer_ptr;
  558. number_buffers--;
  559. }
  560. }
  561. while (number_buffers--) {
  562. if (o3_pki_wqe) {
  563. bptr.u64 = buffer_ptr.u64;
  564. ncl = (bptr.size + CVMX_CACHE_LINE_SIZE - 1) /
  565. CVMX_CACHE_LINE_SIZE;
  566. /* XXX- assumes the buffer is cache-line aligned */
  567. start_of_buffer = (bptr.addr >> 7) << 7;
  568. /*
  569. * Read pointer to next buffer before we free the
  570. * current buffer.
  571. */
  572. next_buffer_ptr = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
  573. /* FPA AURA comes from WQE, includes node */
  574. cvmx_fpa3_free(cvmx_phys_to_ptr(start_of_buffer),
  575. aura, ncl);
  576. } else {
  577. ncl = (buffer_ptr.s.size + CVMX_CACHE_LINE_SIZE - 1) /
  578. CVMX_CACHE_LINE_SIZE +
  579. buffer_ptr.s.back;
  580. /*
  581. * Calculate buffer start using "back" offset,
  582. * Remember the back pointer is in cache lines,
  583. * not 64bit words
  584. */
  585. start_of_buffer = ((buffer_ptr.s.addr >> 7) -
  586. buffer_ptr.s.back) << 7;
  587. /*
  588. * Read pointer to next buffer before we free
  589. * the current buffer.
  590. */
  591. next_buffer_ptr =
  592. *(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
  593. /* FPA pool comes from buf_ptr itself */
  594. if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
  595. aura = cvmx_fpa1_pool_to_fpa3_aura(buffer_ptr.s.pool);
  596. cvmx_fpa3_free(cvmx_phys_to_ptr(start_of_buffer),
  597. aura, ncl);
  598. } else {
  599. cvmx_fpa1_free(cvmx_phys_to_ptr(start_of_buffer),
  600. buffer_ptr.s.pool, ncl);
  601. }
  602. }
  603. buffer_ptr.u64 = next_buffer_ptr;
  604. }
  605. }
  606. void cvmx_helper_setup_legacy_red(int pass_thresh, int drop_thresh)
  607. {
  608. unsigned int node = cvmx_get_node_num();
  609. int aura, bpid;
  610. int buf_cnt;
  611. bool ena_red = 0, ena_drop = 0, ena_bp = 0;
  612. #define FPA_RED_AVG_DLY 1
  613. #define FPA_RED_LVL_DLY 3
  614. #define FPA_QOS_AVRG 0
  615. /* Trying to make it backward compatible with older chips */
  616. /* Setting up avg_dly and prb_dly, enable bits */
  617. if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
  618. cvmx_fpa3_config_red_params(node, FPA_QOS_AVRG,
  619. FPA_RED_LVL_DLY, FPA_RED_AVG_DLY);
  620. }
  621. /* Disable backpressure on queued buffers which is aura in 78xx*/
  622. /*
  623. * Assumption is that all packets from all interface and ports goes
  624. * in same poolx/aurax for backward compatibility
  625. */
  626. aura = cvmx_fpa_get_packet_pool();
  627. buf_cnt = cvmx_fpa_get_packet_pool_buffer_count();
  628. pass_thresh = buf_cnt - pass_thresh;
  629. drop_thresh = buf_cnt - drop_thresh;
  630. /* Map aura to bpid 0*/
  631. bpid = 0;
  632. cvmx_pki_write_aura_bpid(node, aura, bpid);
  633. /* Don't enable back pressure */
  634. ena_bp = 0;
  635. /* enable RED */
  636. ena_red = 1;
  637. /*
  638. * This will enable RED on all interfaces since
  639. * they all have packet buffer coming from same aura
  640. */
  641. cvmx_helper_setup_aura_qos(node, aura, ena_red, ena_drop, pass_thresh,
  642. drop_thresh, ena_bp, 0);
  643. }
  644. /**
  645. * Setup Random Early Drop to automatically begin dropping packets.
  646. *
  647. * @param pass_thresh
  648. * Packets will begin slowly dropping when there are less than
  649. * this many packet buffers free in FPA 0.
  650. * @param drop_thresh
  651. * All incoming packets will be dropped when there are less
  652. * than this many free packet buffers in FPA 0.
  653. * Return: Zero on success. Negative on failure
  654. */
  655. int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
  656. {
  657. if (octeon_has_feature(OCTEON_FEATURE_PKI))
  658. cvmx_helper_setup_legacy_red(pass_thresh, drop_thresh);
  659. else
  660. cvmx_ipd_setup_red(pass_thresh, drop_thresh);
  661. return 0;
  662. }
  663. /**
  664. * @INTERNAL
  665. * Setup the common GMX settings that determine the number of
  666. * ports. These setting apply to almost all configurations of all
  667. * chips.
  668. *
  669. * @param xiface Interface to configure
  670. * @param num_ports Number of ports on the interface
  671. *
  672. * Return: Zero on success, negative on failure
  673. */
  674. int __cvmx_helper_setup_gmx(int xiface, int num_ports)
  675. {
  676. union cvmx_gmxx_tx_prts gmx_tx_prts;
  677. union cvmx_gmxx_rx_prts gmx_rx_prts;
  678. union cvmx_pko_reg_gmx_port_mode pko_mode;
  679. union cvmx_gmxx_txx_thresh gmx_tx_thresh;
  680. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  681. int index;
  682. /*
  683. * The common BGX settings are already done in the appropriate
  684. * enable functions, nothing to do here.
  685. */
  686. if (octeon_has_feature(OCTEON_FEATURE_BGX))
  687. return 0;
  688. /* Tell GMX the number of TX ports on this interface */
  689. gmx_tx_prts.u64 = csr_rd(CVMX_GMXX_TX_PRTS(xi.interface));
  690. gmx_tx_prts.s.prts = num_ports;
  691. csr_wr(CVMX_GMXX_TX_PRTS(xi.interface), gmx_tx_prts.u64);
  692. /*
  693. * Tell GMX the number of RX ports on this interface. This only applies
  694. * to *GMII and XAUI ports.
  695. */
  696. switch (cvmx_helper_interface_get_mode(xiface)) {
  697. case CVMX_HELPER_INTERFACE_MODE_RGMII:
  698. case CVMX_HELPER_INTERFACE_MODE_SGMII:
  699. case CVMX_HELPER_INTERFACE_MODE_QSGMII:
  700. case CVMX_HELPER_INTERFACE_MODE_GMII:
  701. case CVMX_HELPER_INTERFACE_MODE_XAUI:
  702. case CVMX_HELPER_INTERFACE_MODE_RXAUI:
  703. if (num_ports > 4) {
  704. debug("%s: Illegal num_ports\n", __func__);
  705. return -1;
  706. }
  707. gmx_rx_prts.u64 = csr_rd(CVMX_GMXX_RX_PRTS(xi.interface));
  708. gmx_rx_prts.s.prts = num_ports;
  709. csr_wr(CVMX_GMXX_RX_PRTS(xi.interface), gmx_rx_prts.u64);
  710. break;
  711. default:
  712. break;
  713. }
  714. /*
  715. * Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, 50XX,
  716. * and 68XX.
  717. */
  718. if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) {
  719. /* Tell PKO the number of ports on this interface */
  720. pko_mode.u64 = csr_rd(CVMX_PKO_REG_GMX_PORT_MODE);
  721. if (xi.interface == 0) {
  722. if (num_ports == 1)
  723. pko_mode.s.mode0 = 4;
  724. else if (num_ports == 2)
  725. pko_mode.s.mode0 = 3;
  726. else if (num_ports <= 4)
  727. pko_mode.s.mode0 = 2;
  728. else if (num_ports <= 8)
  729. pko_mode.s.mode0 = 1;
  730. else
  731. pko_mode.s.mode0 = 0;
  732. } else {
  733. if (num_ports == 1)
  734. pko_mode.s.mode1 = 4;
  735. else if (num_ports == 2)
  736. pko_mode.s.mode1 = 3;
  737. else if (num_ports <= 4)
  738. pko_mode.s.mode1 = 2;
  739. else if (num_ports <= 8)
  740. pko_mode.s.mode1 = 1;
  741. else
  742. pko_mode.s.mode1 = 0;
  743. }
  744. csr_wr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
  745. }
  746. /*
  747. * Set GMX to buffer as much data as possible before starting
  748. * transmit. This reduces the chances that we have a TX under run
  749. * due to memory contention. Any packet that fits entirely in the
  750. * GMX FIFO can never have an under run regardless of memory load.
  751. */
  752. gmx_tx_thresh.u64 = csr_rd(CVMX_GMXX_TXX_THRESH(0, xi.interface));
  753. /* ccn - common cnt numberator */
  754. int ccn = 0x100;
  755. /* Choose the max value for the number of ports */
  756. if (num_ports <= 1)
  757. gmx_tx_thresh.s.cnt = ccn / 1;
  758. else if (num_ports == 2)
  759. gmx_tx_thresh.s.cnt = ccn / 2;
  760. else
  761. gmx_tx_thresh.s.cnt = ccn / 4;
  762. /*
  763. * SPI and XAUI can have lots of ports but the GMX hardware
  764. * only ever has a max of 4
  765. */
  766. if (num_ports > 4)
  767. num_ports = 4;
  768. for (index = 0; index < num_ports; index++)
  769. csr_wr(CVMX_GMXX_TXX_THRESH(index, xi.interface), gmx_tx_thresh.u64);
  770. /*
  771. * For o68, we need to setup the pipes
  772. */
  773. if (OCTEON_IS_MODEL(OCTEON_CN68XX) && xi.interface < CVMX_HELPER_MAX_GMX) {
  774. union cvmx_gmxx_txx_pipe config;
  775. for (index = 0; index < num_ports; index++) {
  776. config.u64 = 0;
  777. if (__cvmx_helper_cfg_pko_port_base(xiface, index) >= 0) {
  778. config.u64 = csr_rd(CVMX_GMXX_TXX_PIPE(index,
  779. xi.interface));
  780. config.s.nump = __cvmx_helper_cfg_pko_port_num(xiface,
  781. index);
  782. config.s.base = __cvmx_helper_cfg_pko_port_base(xiface,
  783. index);
  784. csr_wr(CVMX_GMXX_TXX_PIPE(index, xi.interface),
  785. config.u64);
  786. }
  787. }
  788. }
  789. return 0;
  790. }
  791. int cvmx_helper_get_pko_port(int interface, int port)
  792. {
  793. return cvmx_pko_get_base_pko_port(interface, port);
  794. }
  795. int cvmx_helper_get_ipd_port(int xiface, int index)
  796. {
  797. struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  798. if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
  799. const struct ipd_port_map *port_map;
  800. int ipd_port;
  801. if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
  802. port_map = ipd_port_map_68xx;
  803. ipd_port = 0;
  804. } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
  805. port_map = ipd_port_map_78xx;
  806. ipd_port = cvmx_helper_node_to_ipd_port(xi.node, 0);
  807. } else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
  808. port_map = ipd_port_map_73xx;
  809. ipd_port = 0;
  810. } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
  811. port_map = ipd_port_map_75xx;
  812. ipd_port = 0;
  813. } else {
  814. return -1;
  815. }
  816. ipd_port += port_map[xi.interface].first_ipd_port;
  817. if (port_map[xi.interface].type == GMII) {
  818. cvmx_helper_interface_mode_t mode;
  819. mode = cvmx_helper_interface_get_mode(xiface);
  820. if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI ||
  821. (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
  822. OCTEON_IS_MODEL(OCTEON_CN68XX))) {
  823. ipd_port += port_map[xi.interface].ipd_port_adj;
  824. return ipd_port;
  825. } else {
  826. return ipd_port + (index * 16);
  827. }
  828. } else if (port_map[xi.interface].type == ILK) {
  829. return ipd_port + index;
  830. } else if (port_map[xi.interface].type == NPI) {
  831. return ipd_port + index;
  832. } else if (port_map[xi.interface].type == SRIO) {
  833. return ipd_port + index;
  834. } else if (port_map[xi.interface].type == LB) {
  835. return ipd_port + index;
  836. }
  837. debug("ERROR: %s: interface %u:%u bad mode\n",
  838. __func__, xi.node, xi.interface);
  839. return -1;
  840. } else if (cvmx_helper_interface_get_mode(xiface) ==
  841. CVMX_HELPER_INTERFACE_MODE_AGL) {
  842. return 24;
  843. }
  844. switch (xi.interface) {
  845. case 0:
  846. return index;
  847. case 1:
  848. return index + 16;
  849. case 2:
  850. return index + 32;
  851. case 3:
  852. return index + 36;
  853. case 4:
  854. return index + 40;
  855. case 5:
  856. return index + 42;
  857. case 6:
  858. return index + 44;
  859. case 7:
  860. return index + 46;
  861. }
  862. return -1;
  863. }
  864. int cvmx_helper_get_pknd(int xiface, int index)
  865. {
  866. if (octeon_has_feature(OCTEON_FEATURE_PKND))
  867. return __cvmx_helper_cfg_pknd(xiface, index);
  868. return CVMX_INVALID_PKND;
  869. }
  870. int cvmx_helper_get_bpid(int interface, int port)
  871. {
  872. if (octeon_has_feature(OCTEON_FEATURE_PKND))
  873. return __cvmx_helper_cfg_bpid(interface, port);
  874. return CVMX_INVALID_BPID;
  875. }
  876. /**
  877. * Display interface statistics.
  878. *
  879. * @param port IPD/PKO port number
  880. *
  881. * Return: none
  882. */
  883. void cvmx_helper_show_stats(int port)
  884. {
  885. cvmx_pip_port_status_t status;
  886. cvmx_pko_port_status_t pko_status;
  887. /* ILK stats */
  888. if (octeon_has_feature(OCTEON_FEATURE_ILK))
  889. __cvmx_helper_ilk_show_stats();
  890. /* PIP stats */
  891. cvmx_pip_get_port_stats(port, 0, &status);
  892. debug("port %d: the number of packets - ipd: %d\n", port,
  893. (int)status.packets);
  894. /* PKO stats */
  895. cvmx_pko_get_port_status(port, 0, &pko_status);
  896. debug("port %d: the number of packets - pko: %d\n", port,
  897. (int)pko_status.packets);
  898. /* TODO: other stats */
  899. }
  900. /**
  901. * Returns the interface number for an IPD/PKO port number.
  902. *
  903. * @param ipd_port IPD/PKO port number
  904. *
  905. * Return: Interface number
  906. */
  907. int cvmx_helper_get_interface_num(int ipd_port)
  908. {
  909. if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
  910. const struct ipd_port_map *port_map;
  911. int i;
  912. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  913. port_map = ipd_port_map_68xx;
  914. for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
  915. if (xp.port >= port_map[i].first_ipd_port &&
  916. xp.port <= port_map[i].last_ipd_port)
  917. return i;
  918. }
  919. return -1;
  920. } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
  921. const struct ipd_port_map *port_map;
  922. int i;
  923. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  924. port_map = ipd_port_map_78xx;
  925. for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
  926. if (xp.port >= port_map[i].first_ipd_port &&
  927. xp.port <= port_map[i].last_ipd_port)
  928. return cvmx_helper_node_interface_to_xiface(xp.node, i);
  929. }
  930. return -1;
  931. } else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
  932. const struct ipd_port_map *port_map;
  933. int i;
  934. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  935. port_map = ipd_port_map_73xx;
  936. for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
  937. if (xp.port >= port_map[i].first_ipd_port &&
  938. xp.port <= port_map[i].last_ipd_port)
  939. return i;
  940. }
  941. return -1;
  942. } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
  943. const struct ipd_port_map *port_map;
  944. int i;
  945. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  946. port_map = ipd_port_map_75xx;
  947. for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
  948. if (xp.port >= port_map[i].first_ipd_port &&
  949. xp.port <= port_map[i].last_ipd_port)
  950. return i;
  951. }
  952. return -1;
  953. } else if (OCTEON_IS_MODEL(OCTEON_CN70XX) && ipd_port == 24) {
  954. return 4;
  955. }
  956. if (ipd_port < 16)
  957. return 0;
  958. else if (ipd_port < 32)
  959. return 1;
  960. else if (ipd_port < 36)
  961. return 2;
  962. else if (ipd_port < 40)
  963. return 3;
  964. else if (ipd_port < 42)
  965. return 4;
  966. else if (ipd_port < 44)
  967. return 5;
  968. else if (ipd_port < 46)
  969. return 6;
  970. else if (ipd_port < 48)
  971. return 7;
  972. debug("%s: Illegal IPD port number %d\n", __func__, ipd_port);
  973. return -1;
  974. }
  975. /**
  976. * Returns the interface index number for an IPD/PKO port
  977. * number.
  978. *
  979. * @param ipd_port IPD/PKO port number
  980. *
  981. * Return: Interface index number
  982. */
  983. int cvmx_helper_get_interface_index_num(int ipd_port)
  984. {
  985. if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
  986. const struct ipd_port_map *port_map;
  987. int port;
  988. enum port_map_if_type type = INVALID_IF_TYPE;
  989. int i;
  990. int num_interfaces;
  991. if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
  992. port_map = ipd_port_map_68xx;
  993. } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
  994. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  995. port_map = ipd_port_map_78xx;
  996. ipd_port = xp.port;
  997. } else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
  998. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  999. port_map = ipd_port_map_73xx;
  1000. ipd_port = xp.port;
  1001. } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
  1002. struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
  1003. port_map = ipd_port_map_75xx;
  1004. ipd_port = xp.port;
  1005. } else {
  1006. return -1;
  1007. }
  1008. num_interfaces = cvmx_helper_get_number_of_interfaces();
  1009. /* Get the interface type of the ipd port */
  1010. for (i = 0; i < num_interfaces; i++) {
  1011. if (ipd_port >= port_map[i].first_ipd_port &&
  1012. ipd_port <= port_map[i].last_ipd_port) {
  1013. type = port_map[i].type;
  1014. break;
  1015. }
  1016. }
  1017. /* Convert the ipd port to the interface port */
  1018. switch (type) {
  1019. /* Ethernet interfaces have a channel in lower 4 bits
  1020. * that is does not discriminate traffic, and is ignored.
  1021. */
  1022. case GMII:
  1023. port = ipd_port - port_map[i].first_ipd_port;
  1024. /* CN68XX adds 0x40 to IPD_PORT when in XAUI/RXAUI
  1025. * mode of operation, adjust for that case
  1026. */
  1027. if (port >= port_map[i].ipd_port_adj)
  1028. port -= port_map[i].ipd_port_adj;
  1029. port >>= 4;
  1030. return port;
  1031. /*
  1032. * These interfaces do not have physical ports,
  1033. * but have logical channels instead that separate
  1034. * traffic into logical streams
  1035. */
  1036. case ILK:
  1037. case SRIO:
  1038. case NPI:
  1039. case LB:
  1040. port = ipd_port - port_map[i].first_ipd_port;
  1041. return port;
  1042. default:
  1043. printf("ERROR: %s: Illegal IPD port number %#x\n",
  1044. __func__, ipd_port);
  1045. return -1;
  1046. }
  1047. }
  1048. if (OCTEON_IS_MODEL(OCTEON_CN70XX))
  1049. return ipd_port & 3;
  1050. if (ipd_port < 32)
  1051. return ipd_port & 15;
  1052. else if (ipd_port < 40)
  1053. return ipd_port & 3;
  1054. else if (ipd_port < 48)
  1055. return ipd_port & 1;
  1056. debug("%s: Illegal IPD port number\n", __func__);
  1057. return -1;
  1058. }
  1059. /**
  1060. * Prints out a buffer with the address, hex bytes, and ASCII
  1061. *
  1062. * @param addr Start address to print on the left
  1063. * @param[in] buffer array of bytes to print
  1064. * @param count Number of bytes to print
  1065. */
  1066. void cvmx_print_buffer_u8(unsigned int addr, const uint8_t *buffer,
  1067. size_t count)
  1068. {
  1069. uint i;
  1070. while (count) {
  1071. unsigned int linelen = count < 16 ? count : 16;
  1072. debug("%08x:", addr);
  1073. for (i = 0; i < linelen; i++)
  1074. debug(" %0*x", 2, buffer[i]);
  1075. while (i++ < 17)
  1076. debug(" ");
  1077. for (i = 0; i < linelen; i++) {
  1078. if (buffer[i] >= 0x20 && buffer[i] < 0x7f)
  1079. debug("%c", buffer[i]);
  1080. else
  1081. debug(".");
  1082. }
  1083. debug("\n");
  1084. addr += linelen;
  1085. buffer += linelen;
  1086. count -= linelen;
  1087. }
  1088. }