nix.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018 Marvell International Ltd.
  4. */
  5. #include <dm.h>
  6. #include <errno.h>
  7. #include <log.h>
  8. #include <malloc.h>
  9. #include <memalign.h>
  10. #include <misc.h>
  11. #include <net.h>
  12. #include <pci.h>
  13. #include <watchdog.h>
  14. #include <asm/arch/board.h>
  15. #include <asm/arch/csrs/csrs-lmt.h>
  16. #include <asm/io.h>
  17. #include <asm/types.h>
  18. #include <linux/delay.h>
  19. #include <linux/log2.h>
  20. #include <linux/types.h>
  21. #include "nix.h"
  22. #include "lmt.h"
  23. #include "cgx.h"
  24. /**
  25. * NIX needs a lot of memory areas. Rather than handle all the failure cases,
  26. * we'll use a wrapper around alloc that prints an error if a memory
  27. * allocation fails.
  28. *
  29. * @param num_elements
  30. * Number of elements to allocate
  31. * @param elem_size Size of each element
  32. * @param msg Text string to show when allocation fails
  33. *
  34. * @return A valid memory location or NULL on failure
  35. */
  36. static void *nix_memalloc(int num_elements, size_t elem_size, const char *msg)
  37. {
  38. size_t alloc_size = num_elements * elem_size;
  39. void *base = memalign(CONFIG_SYS_CACHELINE_SIZE, alloc_size);
  40. if (!base)
  41. printf("NIX: Mem alloc failed for %s (%d * %zu = %zu bytes)\n",
  42. msg ? msg : __func__, num_elements, elem_size,
  43. alloc_size);
  44. else
  45. memset(base, 0, alloc_size);
  46. debug("NIX: Memory alloc for %s (%d * %zu = %zu bytes) at %p\n",
  47. msg ? msg : __func__, num_elements, elem_size, alloc_size, base);
  48. return base;
  49. }
  50. int npc_lf_setup(struct nix *nix)
  51. {
  52. int err;
  53. err = npc_lf_admin_setup(nix);
  54. if (err) {
  55. printf("%s: Error setting up npc lf admin\n", __func__);
  56. return err;
  57. }
  58. return 0;
  59. }
  60. static int npa_setup_pool(struct npa *npa, u32 pool_id,
  61. size_t buffer_size, u32 queue_length, void *buffers[])
  62. {
  63. struct {
  64. union npa_lf_aura_op_free0 f0;
  65. union npa_lf_aura_op_free1 f1;
  66. } aura_descr;
  67. int index;
  68. for (index = 0; index < queue_length; index++) {
  69. buffers[index] = memalign(CONFIG_SYS_CACHELINE_SIZE,
  70. buffer_size);
  71. if (!buffers[index]) {
  72. printf("%s: Out of memory %d, size: %zu\n",
  73. __func__, index, buffer_size);
  74. return -ENOMEM;
  75. }
  76. debug("%s: allocating buffer %d, addr %p size: %zu\n",
  77. __func__, index, buffers[index], buffer_size);
  78. /* Add the newly obtained pointer to the pool. 128 bit
  79. * writes only.
  80. */
  81. aura_descr.f0.s.addr = (u64)buffers[index];
  82. aura_descr.f1.u = 0;
  83. aura_descr.f1.s.aura = pool_id;
  84. st128(npa->npa_base + NPA_LF_AURA_OP_FREE0(),
  85. aura_descr.f0.u, aura_descr.f1.u);
  86. }
  87. return 0;
  88. }
  89. int npa_lf_setup(struct nix *nix)
  90. {
  91. struct rvu_pf *rvu = dev_get_priv(nix->dev);
  92. struct nix_af *nix_af = nix->nix_af;
  93. struct npa *npa;
  94. union npa_af_const npa_af_const;
  95. union npa_aura_s *aura;
  96. union npa_pool_s *pool;
  97. union rvu_func_addr_s block_addr;
  98. int idx;
  99. int stack_page_pointers;
  100. int stack_page_bytes;
  101. int err;
  102. npa = (struct npa *)calloc(1, sizeof(struct npa));
  103. if (!npa) {
  104. printf("%s: out of memory for npa instance\n", __func__);
  105. return -ENOMEM;
  106. }
  107. block_addr.u = 0;
  108. block_addr.s.block = RVU_BLOCK_ADDR_E_NPA;
  109. npa->npa_base = rvu->pf_base + block_addr.u;
  110. npa->npa_af = nix_af->npa_af;
  111. nix->npa = npa;
  112. npa_af_const.u = npa_af_reg_read(npa->npa_af, NPA_AF_CONST());
  113. stack_page_pointers = npa_af_const.s.stack_page_ptrs;
  114. stack_page_bytes = npa_af_const.s.stack_page_bytes;
  115. npa->stack_pages[NPA_POOL_RX] = (RQ_QLEN + stack_page_pointers - 1) /
  116. stack_page_pointers;
  117. npa->stack_pages[NPA_POOL_TX] = (SQ_QLEN + stack_page_pointers - 1) /
  118. stack_page_pointers;
  119. npa->stack_pages[NPA_POOL_SQB] = (SQB_QLEN + stack_page_pointers - 1) /
  120. stack_page_pointers;
  121. npa->pool_stack_pointers = stack_page_pointers;
  122. npa->q_len[NPA_POOL_RX] = RQ_QLEN;
  123. npa->q_len[NPA_POOL_TX] = SQ_QLEN;
  124. npa->q_len[NPA_POOL_SQB] = SQB_QLEN;
  125. npa->buf_size[NPA_POOL_RX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
  126. npa->buf_size[NPA_POOL_TX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
  127. npa->buf_size[NPA_POOL_SQB] = nix_af->sqb_size;
  128. npa->aura_ctx = nix_memalloc(NPA_POOL_COUNT,
  129. sizeof(union npa_aura_s),
  130. "aura context");
  131. if (!npa->aura_ctx) {
  132. printf("%s: Out of memory for aura context\n", __func__);
  133. return -ENOMEM;
  134. }
  135. for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
  136. npa->pool_ctx[idx] = nix_memalloc(1,
  137. sizeof(union npa_pool_s),
  138. "pool context");
  139. if (!npa->pool_ctx[idx]) {
  140. printf("%s: Out of memory for pool context\n",
  141. __func__);
  142. return -ENOMEM;
  143. }
  144. npa->pool_stack[idx] = nix_memalloc(npa->stack_pages[idx],
  145. stack_page_bytes,
  146. "pool stack");
  147. if (!npa->pool_stack[idx]) {
  148. printf("%s: Out of memory for pool stack\n", __func__);
  149. return -ENOMEM;
  150. }
  151. }
  152. err = npa_lf_admin_setup(npa, nix->lf, (dma_addr_t)npa->aura_ctx);
  153. if (err) {
  154. printf("%s: Error setting up NPA LF admin for lf %d\n",
  155. __func__, nix->lf);
  156. return err;
  157. }
  158. /* Set up the auras */
  159. for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
  160. aura = npa->aura_ctx + (idx * sizeof(union npa_aura_s));
  161. pool = npa->pool_ctx[idx];
  162. debug("%s aura %p pool %p\n", __func__, aura, pool);
  163. memset(aura, 0, sizeof(union npa_aura_s));
  164. aura->s.fc_ena = 0;
  165. aura->s.pool_addr = (u64)npa->pool_ctx[idx];
  166. debug("%s aura.s.pool_addr %llx pool_addr %p\n", __func__,
  167. aura->s.pool_addr, npa->pool_ctx[idx]);
  168. aura->s.shift = 64 - __builtin_clzll(npa->q_len[idx]) - 8;
  169. aura->s.count = npa->q_len[idx];
  170. aura->s.limit = npa->q_len[idx];
  171. aura->s.ena = 1;
  172. err = npa_attach_aura(nix_af, nix->lf, aura, idx);
  173. if (err)
  174. return err;
  175. memset(pool, 0, sizeof(*pool));
  176. pool->s.fc_ena = 0;
  177. pool->s.nat_align = 1;
  178. pool->s.stack_base = (u64)(npa->pool_stack[idx]);
  179. debug("%s pool.s.stack_base %llx stack_base %p\n", __func__,
  180. pool->s.stack_base, npa->pool_stack[idx]);
  181. pool->s.buf_size =
  182. npa->buf_size[idx] / CONFIG_SYS_CACHELINE_SIZE;
  183. pool->s.stack_max_pages = npa->stack_pages[idx];
  184. pool->s.shift =
  185. 64 - __builtin_clzll(npa->pool_stack_pointers) - 8;
  186. pool->s.ptr_start = 0;
  187. pool->s.ptr_end = (1ULL << 40) - 1;
  188. pool->s.ena = 1;
  189. err = npa_attach_pool(nix_af, nix->lf, pool, idx);
  190. if (err)
  191. return err;
  192. }
  193. for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
  194. npa->buffers[idx] = nix_memalloc(npa->q_len[idx],
  195. sizeof(void *),
  196. "buffers");
  197. if (!npa->buffers[idx]) {
  198. printf("%s: Out of memory\n", __func__);
  199. return -ENOMEM;
  200. }
  201. }
  202. for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
  203. err = npa_setup_pool(npa, idx, npa->buf_size[idx],
  204. npa->q_len[idx], npa->buffers[idx]);
  205. if (err) {
  206. printf("%s: Error setting up pool %d\n",
  207. __func__, idx);
  208. return err;
  209. }
  210. }
  211. return 0;
  212. }
  213. int npa_lf_shutdown(struct nix *nix)
  214. {
  215. struct npa *npa = nix->npa;
  216. int err;
  217. int pool;
  218. err = npa_lf_admin_shutdown(nix->nix_af, nix->lf, NPA_POOL_COUNT);
  219. if (err) {
  220. printf("%s: Error %d shutting down NPA LF admin\n",
  221. __func__, err);
  222. return err;
  223. }
  224. free(npa->aura_ctx);
  225. npa->aura_ctx = NULL;
  226. for (pool = 0; pool < NPA_POOL_COUNT; pool++) {
  227. free(npa->pool_ctx[pool]);
  228. npa->pool_ctx[pool] = NULL;
  229. free(npa->pool_stack[pool]);
  230. npa->pool_stack[pool] = NULL;
  231. free(npa->buffers[pool]);
  232. npa->buffers[pool] = NULL;
  233. }
  234. return 0;
  235. }
  236. int nix_lf_setup(struct nix *nix)
  237. {
  238. struct nix_af *nix_af = nix->nix_af;
  239. int idx;
  240. int err = -1;
  241. /* Alloc NIX RQ HW context memory */
  242. nix->rq_ctx_base = nix_memalloc(nix->rq_cnt, nix_af->rq_ctx_sz,
  243. "RQ CTX");
  244. if (!nix->rq_ctx_base)
  245. goto error;
  246. memset(nix->rq_ctx_base, 0, nix_af->rq_ctx_sz);
  247. /* Alloc NIX SQ HW context memory */
  248. nix->sq_ctx_base = nix_memalloc(nix->sq_cnt, nix_af->sq_ctx_sz,
  249. "SQ CTX");
  250. if (!nix->sq_ctx_base)
  251. goto error;
  252. memset(nix->sq_ctx_base, 0, nix_af->sq_ctx_sz);
  253. /* Alloc NIX CQ HW context memory */
  254. nix->cq_ctx_base = nix_memalloc(nix->cq_cnt, nix_af->cq_ctx_sz,
  255. "CQ CTX");
  256. if (!nix->cq_ctx_base)
  257. goto error;
  258. memset(nix->cq_ctx_base, 0, nix_af->cq_ctx_sz * NIX_CQ_COUNT);
  259. /* Alloc NIX CQ Ring memory */
  260. for (idx = 0; idx < NIX_CQ_COUNT; idx++) {
  261. err = qmem_alloc(&nix->cq[idx], CQ_ENTRIES, CQ_ENTRY_SIZE);
  262. if (err)
  263. goto error;
  264. }
  265. /* Alloc memory for Qints HW contexts */
  266. nix->qint_base = nix_memalloc(nix_af->qints, nix_af->qint_ctx_sz,
  267. "Qint CTX");
  268. if (!nix->qint_base)
  269. goto error;
  270. /* Alloc memory for CQints HW contexts */
  271. nix->cint_base = nix_memalloc(nix_af->cints, nix_af->cint_ctx_sz,
  272. "Cint CTX");
  273. if (!nix->cint_base)
  274. goto error;
  275. /* Alloc NIX RSS HW context memory and config the base */
  276. nix->rss_base = nix_memalloc(nix->rss_grps, nix_af->rsse_ctx_sz,
  277. "RSS CTX");
  278. if (!nix->rss_base)
  279. goto error;
  280. err = nix_lf_admin_setup(nix);
  281. if (err) {
  282. printf("%s: Error setting up LF\n", __func__);
  283. goto error;
  284. }
  285. return 0;
  286. error:
  287. if (nix->rq_ctx_base)
  288. free(nix->rq_ctx_base);
  289. nix->rq_ctx_base = NULL;
  290. if (nix->rq_ctx_base)
  291. free(nix->rq_ctx_base);
  292. nix->rq_ctx_base = NULL;
  293. if (nix->sq_ctx_base)
  294. free(nix->sq_ctx_base);
  295. nix->sq_ctx_base = NULL;
  296. if (nix->cq_ctx_base)
  297. free(nix->cq_ctx_base);
  298. nix->cq_ctx_base = NULL;
  299. for (idx = 0; idx < NIX_CQ_COUNT; idx++)
  300. qmem_free(&nix->cq[idx]);
  301. return err;
  302. }
  303. int nix_lf_shutdown(struct nix *nix)
  304. {
  305. struct nix_af *nix_af = nix->nix_af;
  306. int index;
  307. int err;
  308. err = nix_lf_admin_shutdown(nix_af, nix->lf, nix->cq_cnt,
  309. nix->rq_cnt, nix->sq_cnt);
  310. if (err) {
  311. printf("%s: Error shutting down LF admin\n", __func__);
  312. return err;
  313. }
  314. if (nix->rq_ctx_base)
  315. free(nix->rq_ctx_base);
  316. nix->rq_ctx_base = NULL;
  317. if (nix->rq_ctx_base)
  318. free(nix->rq_ctx_base);
  319. nix->rq_ctx_base = NULL;
  320. if (nix->sq_ctx_base)
  321. free(nix->sq_ctx_base);
  322. nix->sq_ctx_base = NULL;
  323. if (nix->cq_ctx_base)
  324. free(nix->cq_ctx_base);
  325. nix->cq_ctx_base = NULL;
  326. for (index = 0; index < NIX_CQ_COUNT; index++)
  327. qmem_free(&nix->cq[index]);
  328. debug("%s: nix lf %d reset --\n", __func__, nix->lf);
  329. return 0;
  330. }
  331. struct nix *nix_lf_alloc(struct udevice *dev)
  332. {
  333. union rvu_func_addr_s block_addr;
  334. struct nix *nix;
  335. struct rvu_pf *rvu = dev_get_priv(dev);
  336. struct rvu_af *rvu_af = dev_get_priv(rvu->afdev);
  337. union rvu_pf_func_s pf_func;
  338. int err;
  339. debug("%s(%s )\n", __func__, dev->name);
  340. nix = (struct nix *)calloc(1, sizeof(*nix));
  341. if (!nix) {
  342. printf("%s: Out of memory for nix instance\n", __func__);
  343. return NULL;
  344. }
  345. nix->nix_af = rvu_af->nix_af;
  346. block_addr.u = 0;
  347. block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0);
  348. nix->nix_base = rvu->pf_base + block_addr.u;
  349. block_addr.u = 0;
  350. block_addr.s.block = RVU_BLOCK_ADDR_E_NPC;
  351. nix->npc_base = rvu->pf_base + block_addr.u;
  352. block_addr.u = 0;
  353. block_addr.s.block = RVU_BLOCK_ADDR_E_LMT;
  354. nix->lmt_base = rvu->pf_base + block_addr.u;
  355. pf_func.u = 0;
  356. pf_func.s.pf = rvu->pfid;
  357. nix->pf_func = pf_func.u;
  358. nix->lf = rvu->nix_lfid;
  359. nix->pf = rvu->pfid;
  360. nix->dev = dev;
  361. nix->sq_cnt = 1;
  362. nix->rq_cnt = 1;
  363. nix->rss_grps = 1;
  364. nix->cq_cnt = 2;
  365. nix->xqe_sz = NIX_CQE_SIZE_W16;
  366. nix->lmac = nix_get_cgx_lmac(nix->pf);
  367. if (!nix->lmac) {
  368. printf("%s: Error: could not find lmac for pf %d\n",
  369. __func__, nix->pf);
  370. free(nix);
  371. return NULL;
  372. }
  373. nix->lmac->link_num =
  374. NIX_LINK_E_CGXX_LMACX(nix->lmac->cgx->cgx_id,
  375. nix->lmac->lmac_id);
  376. nix->lmac->chan_num =
  377. NIX_CHAN_E_CGXX_LMACX_CHX(nix->lmac->cgx->cgx_id,
  378. nix->lmac->lmac_id, 0);
  379. /* This is rx pkind in 1:1 mapping to NIX_LINK_E */
  380. nix->lmac->pknd = nix->lmac->link_num;
  381. cgx_lmac_set_pkind(nix->lmac, nix->lmac->lmac_id, nix->lmac->pknd);
  382. debug("%s(%s CGX%x LMAC%x)\n", __func__, dev->name,
  383. nix->lmac->cgx->cgx_id, nix->lmac->lmac_id);
  384. debug("%s(%s Link %x Chan %x Pknd %x)\n", __func__, dev->name,
  385. nix->lmac->link_num, nix->lmac->chan_num, nix->lmac->pknd);
  386. err = npa_lf_setup(nix);
  387. if (err)
  388. return NULL;
  389. err = npc_lf_setup(nix);
  390. if (err)
  391. return NULL;
  392. err = nix_lf_setup(nix);
  393. if (err)
  394. return NULL;
  395. return nix;
  396. }
  397. u64 npa_aura_op_alloc(struct npa *npa, u64 aura_id)
  398. {
  399. union npa_lf_aura_op_allocx op_allocx;
  400. op_allocx.u = atomic_fetch_and_add64_nosync(npa->npa_base +
  401. NPA_LF_AURA_OP_ALLOCX(0), aura_id);
  402. return op_allocx.s.addr;
  403. }
  404. u64 nix_cq_op_status(struct nix *nix, u64 cq_id)
  405. {
  406. union nixx_lf_cq_op_status op_status;
  407. s64 *reg = nix->nix_base + NIXX_LF_CQ_OP_STATUS();
  408. op_status.u = atomic_fetch_and_add64_nosync(reg, cq_id << 32);
  409. return op_status.u;
  410. }
  411. /* TX */
  412. static inline void nix_write_lmt(struct nix *nix, void *buffer,
  413. int num_words)
  414. {
  415. int i;
  416. u64 *lmt_ptr = lmt_store_ptr(nix);
  417. u64 *ptr = buffer;
  418. debug("%s lmt_ptr %p %p\n", __func__, nix->lmt_base, lmt_ptr);
  419. for (i = 0; i < num_words; i++) {
  420. debug("%s data %llx lmt_ptr %p\n", __func__, ptr[i],
  421. lmt_ptr + i);
  422. lmt_ptr[i] = ptr[i];
  423. }
  424. }
  425. void nix_cqe_tx_pkt_handler(struct nix *nix, void *cqe)
  426. {
  427. union nix_cqe_hdr_s *txcqe = (union nix_cqe_hdr_s *)cqe;
  428. debug("%s: txcqe: %p\n", __func__, txcqe);
  429. if (txcqe->s.cqe_type != NIX_XQE_TYPE_E_SEND) {
  430. printf("%s: Error: Unsupported CQ header type %d\n",
  431. __func__, txcqe->s.cqe_type);
  432. return;
  433. }
  434. nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
  435. (NIX_CQ_TX << 32) | 1);
  436. }
  437. void nix_lf_flush_tx(struct udevice *dev)
  438. {
  439. struct rvu_pf *rvu = dev_get_priv(dev);
  440. struct nix *nix = rvu->nix;
  441. union nixx_lf_cq_op_status op_status;
  442. u32 head, tail;
  443. void *cq_tx_base = nix->cq[NIX_CQ_TX].base;
  444. union nix_cqe_hdr_s *cqe;
  445. /* ack tx cqe entries */
  446. op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
  447. head = op_status.s.head;
  448. tail = op_status.s.tail;
  449. head &= (nix->cq[NIX_CQ_TX].qsize - 1);
  450. tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
  451. debug("%s cq tx head %d tail %d\n", __func__, head, tail);
  452. while (head != tail) {
  453. cqe = cq_tx_base + head * nix->cq[NIX_CQ_TX].entry_sz;
  454. nix_cqe_tx_pkt_handler(nix, cqe);
  455. op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
  456. head = op_status.s.head;
  457. tail = op_status.s.tail;
  458. head &= (nix->cq[NIX_CQ_TX].qsize - 1);
  459. tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
  460. debug("%s cq tx head %d tail %d\n", __func__, head, tail);
  461. }
  462. }
  463. int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len)
  464. {
  465. struct rvu_pf *rvu = dev_get_priv(dev);
  466. struct nix *nix = rvu->nix;
  467. struct nix_tx_dr tx_dr;
  468. int dr_sz = (sizeof(struct nix_tx_dr) + 15) / 16 - 1;
  469. s64 result;
  470. void *packet;
  471. nix_lf_flush_tx(dev);
  472. memset((void *)&tx_dr, 0, sizeof(struct nix_tx_dr));
  473. /* Dump TX packet in to NPA buffer */
  474. packet = (void *)npa_aura_op_alloc(nix->npa, NPA_POOL_TX);
  475. if (!packet) {
  476. printf("%s TX buffers unavailable\n", __func__);
  477. return -1;
  478. }
  479. memcpy(packet, pkt, pkt_len);
  480. debug("%s TX buffer %p\n", __func__, packet);
  481. tx_dr.hdr.s.aura = NPA_POOL_TX;
  482. tx_dr.hdr.s.df = 0;
  483. tx_dr.hdr.s.pnc = 1;
  484. tx_dr.hdr.s.sq = 0;
  485. tx_dr.hdr.s.total = pkt_len;
  486. tx_dr.hdr.s.sizem1 = dr_sz - 2; /* FIXME - for now hdr+sg+sg1addr */
  487. debug("%s dr_sz %d\n", __func__, dr_sz);
  488. tx_dr.tx_sg.s.segs = 1;
  489. tx_dr.tx_sg.s.subdc = NIX_SUBDC_E_SG;
  490. tx_dr.tx_sg.s.seg1_size = pkt_len;
  491. tx_dr.tx_sg.s.ld_type = NIX_SENDLDTYPE_E_LDT;
  492. tx_dr.sg1_addr = (dma_addr_t)packet;
  493. #define DEBUG_PKT
  494. #ifdef DEBUG_PKT
  495. debug("TX PKT Data\n");
  496. for (int i = 0; i < pkt_len; i++) {
  497. if (i && (i % 8 == 0))
  498. debug("\n");
  499. debug("%02x ", *((u8 *)pkt + i));
  500. }
  501. debug("\n");
  502. #endif
  503. do {
  504. nix_write_lmt(nix, &tx_dr, (dr_sz - 1) * 2);
  505. __iowmb();
  506. result = lmt_submit((u64)(nix->nix_base +
  507. NIXX_LF_OP_SENDX(0)));
  508. WATCHDOG_RESET();
  509. } while (result == 0);
  510. return 0;
  511. }
  512. /* RX */
  513. void nix_lf_flush_rx(struct udevice *dev)
  514. {
  515. struct rvu_pf *rvu = dev_get_priv(dev);
  516. struct nix *nix = rvu->nix;
  517. union nixx_lf_cq_op_status op_status;
  518. void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
  519. struct nix_rx_dr *rx_dr;
  520. union nix_rx_parse_s *rxparse;
  521. u32 head, tail;
  522. u32 rx_cqe_sz = nix->cq[NIX_CQ_RX].entry_sz;
  523. u64 *seg;
  524. /* flush rx cqe entries */
  525. op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
  526. head = op_status.s.head;
  527. tail = op_status.s.tail;
  528. head &= (nix->cq[NIX_CQ_RX].qsize - 1);
  529. tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
  530. debug("%s cq rx head %d tail %d\n", __func__, head, tail);
  531. while (head != tail) {
  532. rx_dr = (struct nix_rx_dr *)cq_rx_base + head * rx_cqe_sz;
  533. rxparse = &rx_dr->rx_parse;
  534. debug("%s: rx parse: %p\n", __func__, rxparse);
  535. debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
  536. __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
  537. seg = (dma_addr_t *)(&rx_dr->rx_sg + 1);
  538. st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(),
  539. seg[0], (1ULL << 63) | NPA_POOL_RX);
  540. debug("%s return %llx to NPA\n", __func__, seg[0]);
  541. nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
  542. (NIX_CQ_RX << 32) | 1);
  543. op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
  544. head = op_status.s.head;
  545. tail = op_status.s.tail;
  546. head &= (nix->cq[NIX_CQ_RX].qsize - 1);
  547. tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
  548. debug("%s cq rx head %d tail %d\n", __func__, head, tail);
  549. }
  550. }
  551. int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
  552. {
  553. struct rvu_pf *rvu = dev_get_priv(dev);
  554. struct nix *nix = rvu->nix;
  555. /* Return rx packet to NPA */
  556. debug("%s return %p to NPA\n", __func__, pkt);
  557. st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), (u64)pkt,
  558. (1ULL << 63) | NPA_POOL_RX);
  559. nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
  560. (NIX_CQ_RX << 32) | 1);
  561. nix_lf_flush_tx(dev);
  562. return 0;
  563. }
  564. int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp)
  565. {
  566. struct rvu_pf *rvu = dev_get_priv(dev);
  567. struct nix *nix = rvu->nix;
  568. union nixx_lf_cq_op_status op_status;
  569. void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
  570. struct nix_rx_dr *rx_dr;
  571. union nix_rx_parse_s *rxparse;
  572. void *pkt, *cqe;
  573. int pkt_len = 0;
  574. u64 *addr;
  575. u32 head, tail;
  576. /* fetch rx cqe entries */
  577. op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
  578. head = op_status.s.head;
  579. tail = op_status.s.tail;
  580. head &= (nix->cq[NIX_CQ_RX].qsize - 1);
  581. tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
  582. debug("%s cq rx head %d tail %d\n", __func__, head, tail);
  583. if (head == tail)
  584. return -EAGAIN;
  585. debug("%s: rx_base %p head %d sz %d\n", __func__, cq_rx_base, head,
  586. nix->cq[NIX_CQ_RX].entry_sz);
  587. cqe = cq_rx_base + head * nix->cq[NIX_CQ_RX].entry_sz;
  588. rx_dr = (struct nix_rx_dr *)cqe;
  589. rxparse = &rx_dr->rx_parse;
  590. debug("%s: rx completion: %p\n", __func__, cqe);
  591. debug("%s: rx dr: %p\n", __func__, rx_dr);
  592. debug("%s: rx parse: %p\n", __func__, rxparse);
  593. debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
  594. __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
  595. debug("%s: rx parse: pkind %x chan %x\n",
  596. __func__, rxparse->s.pkind, rxparse->s.chan);
  597. if (rx_dr->hdr.s.cqe_type != NIX_XQE_TYPE_E_RX) {
  598. printf("%s: Error: Unsupported CQ header type in Rx %d\n",
  599. __func__, rx_dr->hdr.s.cqe_type);
  600. return -1;
  601. }
  602. pkt_len = rxparse->s.pkt_lenm1 + 1;
  603. addr = (dma_addr_t *)(&rx_dr->rx_sg + 1);
  604. pkt = (void *)addr[0];
  605. debug("%s: segs: %d (%d@0x%llx, %d@0x%llx, %d@0x%llx)\n", __func__,
  606. rx_dr->rx_sg.s.segs, rx_dr->rx_sg.s.seg1_size, addr[0],
  607. rx_dr->rx_sg.s.seg2_size, addr[1],
  608. rx_dr->rx_sg.s.seg3_size, addr[2]);
  609. if (pkt_len < rx_dr->rx_sg.s.seg1_size + rx_dr->rx_sg.s.seg2_size +
  610. rx_dr->rx_sg.s.seg3_size) {
  611. debug("%s: Error: rx buffer size too small\n", __func__);
  612. return -1;
  613. }
  614. __iowmb();
  615. #define DEBUG_PKT
  616. #ifdef DEBUG_PKT
  617. debug("RX PKT Data\n");
  618. for (int i = 0; i < pkt_len; i++) {
  619. if (i && (i % 8 == 0))
  620. debug("\n");
  621. debug("%02x ", *((u8 *)pkt + i));
  622. }
  623. debug("\n");
  624. #endif
  625. *packetp = (uchar *)pkt;
  626. return pkt_len;
  627. }
  628. int nix_lf_setup_mac(struct udevice *dev)
  629. {
  630. struct rvu_pf *rvu = dev_get_priv(dev);
  631. struct nix *nix = rvu->nix;
  632. struct eth_pdata *pdata = dev_get_plat(dev);
  633. /* If lower level firmware fails to set proper MAC
  634. * u-boot framework updates MAC to random address.
  635. * Use this hook to update mac address in cgx lmac
  636. * and call mac filter setup to update new address.
  637. */
  638. if (memcmp(nix->lmac->mac_addr, pdata->enetaddr, ARP_HLEN)) {
  639. memcpy(nix->lmac->mac_addr, pdata->enetaddr, 6);
  640. eth_env_set_enetaddr_by_index("eth", dev_seq(rvu->dev),
  641. pdata->enetaddr);
  642. cgx_lmac_mac_filter_setup(nix->lmac);
  643. /* Update user given MAC address to ATF for update
  644. * in sh_fwdata to use in Linux.
  645. */
  646. cgx_intf_set_macaddr(dev);
  647. debug("%s: lMAC %pM\n", __func__, nix->lmac->mac_addr);
  648. debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
  649. }
  650. debug("%s: setupMAC %pM\n", __func__, pdata->enetaddr);
  651. return 0;
  652. }
  653. void nix_lf_halt(struct udevice *dev)
  654. {
  655. struct rvu_pf *rvu = dev_get_priv(dev);
  656. struct nix *nix = rvu->nix;
  657. cgx_lmac_rx_tx_enable(nix->lmac, nix->lmac->lmac_id, false);
  658. mdelay(1);
  659. /* Flush tx and rx descriptors */
  660. nix_lf_flush_rx(dev);
  661. nix_lf_flush_tx(dev);
  662. }
  663. int nix_lf_init(struct udevice *dev)
  664. {
  665. struct rvu_pf *rvu = dev_get_priv(dev);
  666. struct nix *nix = rvu->nix;
  667. struct lmac *lmac = nix->lmac;
  668. int ret;
  669. u64 link_sts;
  670. u8 link, speed;
  671. u16 errcode;
  672. printf("Waiting for CGX%d LMAC%d [%s] link status...",
  673. lmac->cgx->cgx_id, lmac->lmac_id,
  674. lmac_type_to_str[lmac->lmac_type]);
  675. if (lmac->init_pend) {
  676. /* Bring up LMAC */
  677. ret = cgx_lmac_link_enable(lmac, lmac->lmac_id,
  678. true, &link_sts);
  679. lmac->init_pend = 0;
  680. } else {
  681. ret = cgx_lmac_link_status(lmac, lmac->lmac_id, &link_sts);
  682. }
  683. if (ret) {
  684. printf(" [Down]\n");
  685. return -1;
  686. }
  687. link = link_sts & 0x1;
  688. speed = (link_sts >> 2) & 0xf;
  689. errcode = (link_sts >> 6) & 0x2ff;
  690. debug("%s: link %x speed %x errcode %x\n",
  691. __func__, link, speed, errcode);
  692. /* Print link status */
  693. printf(" [%s]\n", link ? lmac_speed_to_str[speed] : "Down");
  694. if (!link)
  695. return -1;
  696. if (!lmac->init_pend)
  697. cgx_lmac_rx_tx_enable(lmac, lmac->lmac_id, true);
  698. return 0;
  699. }
  700. void nix_get_cgx_lmac_id(struct udevice *dev, int *cgxid, int *lmacid)
  701. {
  702. struct rvu_pf *rvu = dev_get_priv(dev);
  703. struct nix *nix = rvu->nix;
  704. struct lmac *lmac = nix->lmac;
  705. *cgxid = lmac->cgx->cgx_id;
  706. *lmacid = lmac->lmac_id;
  707. }
  708. void nix_print_mac_info(struct udevice *dev)
  709. {
  710. struct rvu_pf *rvu = dev_get_priv(dev);
  711. struct nix *nix = rvu->nix;
  712. struct lmac *lmac = nix->lmac;
  713. printf(" CGX%d LMAC%d [%s]", lmac->cgx->cgx_id, lmac->lmac_id,
  714. lmac_type_to_str[lmac->lmac_type]);
  715. }