pfe_driver.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2015-2016 Freescale Semiconductor, Inc.
  4. * Copyright 2017 NXP
  5. */
  6. #include <log.h>
  7. #include <malloc.h>
  8. #include <linux/delay.h>
  9. #include <net/pfe_eth/pfe_eth.h>
  10. #include <net/pfe_eth/pfe_firmware.h>
  11. static struct tx_desc_s *g_tx_desc;
  12. static struct rx_desc_s *g_rx_desc;
  13. /*
  14. * HIF Rx interface function
  15. * Reads the rx descriptor from the current location (rx_to_read).
  16. * - If the descriptor has a valid data/pkt, then get the data pointer
  17. * - check for the input rx phy number
  18. * - increment the rx data pointer by pkt_head_room_size
  19. * - decrement the data length by pkt_head_room_size
  20. * - handover the packet to caller.
  21. *
  22. * @param[out] pkt_ptr - Pointer to store rx packet
  23. * @param[out] phy_port - Pointer to store recv phy port
  24. *
  25. * @return -1 if no packet, else return length of packet.
  26. */
  27. int pfe_recv(uchar **pkt_ptr, int *phy_port)
  28. {
  29. struct rx_desc_s *rx_desc = g_rx_desc;
  30. struct buf_desc *bd;
  31. int len = 0;
  32. struct hif_header_s *hif_header;
  33. bd = rx_desc->rx_base + rx_desc->rx_to_read;
  34. if (readl(&bd->ctrl) & BD_CTRL_DESC_EN)
  35. return len; /* No pending Rx packet */
  36. /* this len include hif_header(8 bytes) */
  37. len = readl(&bd->ctrl) & 0xFFFF;
  38. hif_header = (struct hif_header_s *)DDR_PFE_TO_VIRT(readl(&bd->data));
  39. /* Get the receive port info from the packet */
  40. debug("Pkt received:");
  41. debug(" Pkt ptr(%p), len(%d), gemac_port(%d) status(%08x)\n",
  42. hif_header, len, hif_header->port_no, readl(&bd->status));
  43. #ifdef DEBUG
  44. {
  45. int i;
  46. unsigned char *p = (unsigned char *)hif_header;
  47. for (i = 0; i < len; i++) {
  48. if (!(i % 16))
  49. printf("\n");
  50. printf(" %02x", p[i]);
  51. }
  52. printf("\n");
  53. }
  54. #endif
  55. *pkt_ptr = (uchar *)(hif_header + 1);
  56. *phy_port = hif_header->port_no;
  57. len -= sizeof(struct hif_header_s);
  58. return len;
  59. }
  60. /*
  61. * HIF function to check the Rx done
  62. * This function will check the rx done indication of the current rx_to_read
  63. * locations
  64. * if success, moves the rx_to_read to next location.
  65. */
  66. int pfe_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
  67. {
  68. struct rx_desc_s *rx_desc = g_rx_desc;
  69. struct buf_desc *bd;
  70. debug("%s:rx_base: %p, rx_to_read: %d\n", __func__, rx_desc->rx_base,
  71. rx_desc->rx_to_read);
  72. bd = rx_desc->rx_base + rx_desc->rx_to_read;
  73. /* reset the control field */
  74. writel((MAX_FRAME_SIZE | BD_CTRL_LIFM | BD_CTRL_DESC_EN
  75. | BD_CTRL_DIR), &bd->ctrl);
  76. writel(0, &bd->status);
  77. debug("Rx Done : status: %08x, ctrl: %08x\n", readl(&bd->status),
  78. readl(&bd->ctrl));
  79. /* Give START_STROBE to BDP to fetch the descriptor __NOW__,
  80. * BDP need not wait for rx_poll_cycle time to fetch the descriptor,
  81. * In idle state (ie., no rx pkt), BDP will not fetch
  82. * the descriptor even if strobe is given.
  83. */
  84. writel((readl(HIF_RX_CTRL) | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL);
  85. /* increment the rx_to_read index to next location */
  86. rx_desc->rx_to_read = (rx_desc->rx_to_read + 1)
  87. & (rx_desc->rx_ring_size - 1);
  88. debug("Rx next pkt location: %d\n", rx_desc->rx_to_read);
  89. return 0;
  90. }
  91. /*
  92. * HIF Tx interface function
  93. * This function sends a single packet to PFE from HIF interface.
  94. * - No interrupt indication on tx completion.
  95. * - Data is copied to tx buffers before tx descriptor is updated
  96. * and TX DMA is enabled.
  97. *
  98. * @param[in] phy_port Phy port number to send out this packet
  99. * @param[in] data Pointer to the data
  100. * @param[in] length Length of the ethernet packet to be transferred.
  101. *
  102. * @return -1 if tx Q is full, else returns the tx location where the pkt is
  103. * placed.
  104. */
  105. int pfe_send(int phy_port, void *data, int length)
  106. {
  107. struct tx_desc_s *tx_desc = g_tx_desc;
  108. struct buf_desc *bd;
  109. struct hif_header_s hif_header;
  110. u8 *tx_buf_va;
  111. debug("%s:pkt: %p, len: %d, tx_base: %p, tx_to_send: %d\n", __func__,
  112. data, length, tx_desc->tx_base, tx_desc->tx_to_send);
  113. bd = tx_desc->tx_base + tx_desc->tx_to_send;
  114. /* check queue-full condition */
  115. if (readl(&bd->ctrl) & BD_CTRL_DESC_EN)
  116. return -1;
  117. /* PFE checks for min pkt size */
  118. if (length < MIN_PKT_SIZE)
  119. length = MIN_PKT_SIZE;
  120. tx_buf_va = (void *)DDR_PFE_TO_VIRT(readl(&bd->data));
  121. debug("%s: tx_buf_va: %p, tx_buf_pa: %08x\n", __func__, tx_buf_va,
  122. readl(&bd->data));
  123. /* Fill the gemac/phy port number to send this packet out */
  124. memset(&hif_header, 0, sizeof(struct hif_header_s));
  125. hif_header.port_no = phy_port;
  126. memcpy(tx_buf_va, (u8 *)&hif_header, sizeof(struct hif_header_s));
  127. memcpy(tx_buf_va + sizeof(struct hif_header_s), data, length);
  128. length += sizeof(struct hif_header_s);
  129. #ifdef DEBUG
  130. {
  131. int i;
  132. unsigned char *p = (unsigned char *)tx_buf_va;
  133. for (i = 0; i < length; i++) {
  134. if (!(i % 16))
  135. printf("\n");
  136. printf("%02x ", p[i]);
  137. }
  138. }
  139. #endif
  140. debug("Tx Done: status: %08x, ctrl: %08x\n", readl(&bd->status),
  141. readl(&bd->ctrl));
  142. /* fill the tx desc */
  143. writel((u32)(BD_CTRL_DESC_EN | BD_CTRL_LIFM | (length & 0xFFFF)),
  144. &bd->ctrl);
  145. writel(0, &bd->status);
  146. writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_TX_CTRL);
  147. udelay(100);
  148. return tx_desc->tx_to_send;
  149. }
  150. /*
  151. * HIF function to check the Tx done
  152. * This function will check the tx done indication of the current tx_to_send
  153. * locations
  154. * if success, moves the tx_to_send to next location.
  155. *
  156. * @return -1 if TX ownership bit is not cleared by hw.
  157. * else on success (tx done completion) return zero.
  158. */
  159. int pfe_tx_done(void)
  160. {
  161. struct tx_desc_s *tx_desc = g_tx_desc;
  162. struct buf_desc *bd;
  163. debug("%s:tx_base: %p, tx_to_send: %d\n", __func__, tx_desc->tx_base,
  164. tx_desc->tx_to_send);
  165. bd = tx_desc->tx_base + tx_desc->tx_to_send;
  166. /* check queue-full condition */
  167. if (readl(&bd->ctrl) & BD_CTRL_DESC_EN)
  168. return -1;
  169. /* reset the control field */
  170. writel(0, &bd->ctrl);
  171. writel(0, &bd->status);
  172. debug("Tx Done : status: %08x, ctrl: %08x\n", readl(&bd->status),
  173. readl(&bd->ctrl));
  174. /* increment the txtosend index to next location */
  175. tx_desc->tx_to_send = (tx_desc->tx_to_send + 1)
  176. & (tx_desc->tx_ring_size - 1);
  177. debug("Tx next pkt location: %d\n", tx_desc->tx_to_send);
  178. return 0;
  179. }
  180. /*
  181. * Helper function to dump Rx descriptors.
  182. */
  183. static inline void hif_rx_desc_dump(void)
  184. {
  185. struct buf_desc *bd_va;
  186. int i;
  187. struct rx_desc_s *rx_desc;
  188. if (!g_rx_desc) {
  189. printf("%s: HIF Rx desc no init\n", __func__);
  190. return;
  191. }
  192. rx_desc = g_rx_desc;
  193. bd_va = rx_desc->rx_base;
  194. debug("HIF rx desc: base_va: %p, base_pa: %08x\n", rx_desc->rx_base,
  195. rx_desc->rx_base_pa);
  196. for (i = 0; i < rx_desc->rx_ring_size; i++) {
  197. debug("status: %08x, ctrl: %08x, data: %08x, next: 0x%08x\n",
  198. readl(&bd_va->status),
  199. readl(&bd_va->ctrl),
  200. readl(&bd_va->data),
  201. readl(&bd_va->next));
  202. bd_va++;
  203. }
  204. }
  205. /*
  206. * This function mark all Rx descriptors as LAST_BD.
  207. */
  208. void hif_rx_desc_disable(void)
  209. {
  210. int i;
  211. struct rx_desc_s *rx_desc;
  212. struct buf_desc *bd_va;
  213. if (!g_rx_desc) {
  214. printf("%s: HIF Rx desc not initialized\n", __func__);
  215. return;
  216. }
  217. rx_desc = g_rx_desc;
  218. bd_va = rx_desc->rx_base;
  219. for (i = 0; i < rx_desc->rx_ring_size; i++) {
  220. writel(readl(&bd_va->ctrl) | BD_CTRL_LAST_BD, &bd_va->ctrl);
  221. bd_va++;
  222. }
  223. }
  224. /*
  225. * HIF Rx Desc initialization function.
  226. */
  227. static int hif_rx_desc_init(struct pfe_ddr_address *pfe_addr)
  228. {
  229. u32 ctrl;
  230. struct buf_desc *bd_va;
  231. struct buf_desc *bd_pa;
  232. struct rx_desc_s *rx_desc;
  233. u32 rx_buf_pa;
  234. int i;
  235. /* sanity check */
  236. if (g_rx_desc) {
  237. printf("%s: HIF Rx desc re-init request\n", __func__);
  238. return 0;
  239. }
  240. rx_desc = (struct rx_desc_s *)malloc(sizeof(struct rx_desc_s));
  241. if (!rx_desc) {
  242. printf("%s: Memory allocation failure\n", __func__);
  243. return -ENOMEM;
  244. }
  245. memset(rx_desc, 0, sizeof(struct rx_desc_s));
  246. /* init: Rx ring buffer */
  247. rx_desc->rx_ring_size = HIF_RX_DESC_NT;
  248. /* NOTE: must be 64bit aligned */
  249. bd_va = (struct buf_desc *)(pfe_addr->ddr_pfe_baseaddr
  250. + RX_BD_BASEADDR);
  251. bd_pa = (struct buf_desc *)(pfe_addr->ddr_pfe_phys_baseaddr
  252. + RX_BD_BASEADDR);
  253. rx_desc->rx_base = bd_va;
  254. rx_desc->rx_base_pa = (unsigned long)bd_pa;
  255. rx_buf_pa = pfe_addr->ddr_pfe_phys_baseaddr + HIF_RX_PKT_DDR_BASEADDR;
  256. debug("%s: Rx desc base: %p, base_pa: %08x, desc_count: %d\n",
  257. __func__, rx_desc->rx_base, rx_desc->rx_base_pa,
  258. rx_desc->rx_ring_size);
  259. memset(bd_va, 0, sizeof(struct buf_desc) * rx_desc->rx_ring_size);
  260. ctrl = (MAX_FRAME_SIZE | BD_CTRL_DESC_EN | BD_CTRL_DIR | BD_CTRL_LIFM);
  261. for (i = 0; i < rx_desc->rx_ring_size; i++) {
  262. writel((unsigned long)(bd_pa + 1), &bd_va->next);
  263. writel(ctrl, &bd_va->ctrl);
  264. writel(rx_buf_pa + (i * MAX_FRAME_SIZE), &bd_va->data);
  265. bd_va++;
  266. bd_pa++;
  267. }
  268. --bd_va;
  269. writel((u32)rx_desc->rx_base_pa, &bd_va->next);
  270. writel(rx_desc->rx_base_pa, HIF_RX_BDP_ADDR);
  271. writel((readl(HIF_RX_CTRL) | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL);
  272. g_rx_desc = rx_desc;
  273. return 0;
  274. }
  275. /*
  276. * Helper function to dump Tx Descriptors.
  277. */
  278. static inline void hif_tx_desc_dump(void)
  279. {
  280. struct tx_desc_s *tx_desc;
  281. int i;
  282. struct buf_desc *bd_va;
  283. if (!g_tx_desc) {
  284. printf("%s: HIF Tx desc no init\n", __func__);
  285. return;
  286. }
  287. tx_desc = g_tx_desc;
  288. bd_va = tx_desc->tx_base;
  289. debug("HIF tx desc: base_va: %p, base_pa: %08x\n", tx_desc->tx_base,
  290. tx_desc->tx_base_pa);
  291. for (i = 0; i < tx_desc->tx_ring_size; i++)
  292. bd_va++;
  293. }
  294. /*
  295. * HIF Tx descriptor initialization function.
  296. */
  297. static int hif_tx_desc_init(struct pfe_ddr_address *pfe_addr)
  298. {
  299. struct buf_desc *bd_va;
  300. struct buf_desc *bd_pa;
  301. int i;
  302. struct tx_desc_s *tx_desc;
  303. u32 tx_buf_pa;
  304. /* sanity check */
  305. if (g_tx_desc) {
  306. printf("%s: HIF Tx desc re-init request\n", __func__);
  307. return 0;
  308. }
  309. tx_desc = (struct tx_desc_s *)malloc(sizeof(struct tx_desc_s));
  310. if (!tx_desc) {
  311. printf("%s:%d:Memory allocation failure\n", __func__,
  312. __LINE__);
  313. return -ENOMEM;
  314. }
  315. memset(tx_desc, 0, sizeof(struct tx_desc_s));
  316. /* init: Tx ring buffer */
  317. tx_desc->tx_ring_size = HIF_TX_DESC_NT;
  318. /* NOTE: must be 64bit aligned */
  319. bd_va = (struct buf_desc *)(pfe_addr->ddr_pfe_baseaddr
  320. + TX_BD_BASEADDR);
  321. bd_pa = (struct buf_desc *)(pfe_addr->ddr_pfe_phys_baseaddr
  322. + TX_BD_BASEADDR);
  323. tx_desc->tx_base_pa = (unsigned long)bd_pa;
  324. tx_desc->tx_base = bd_va;
  325. debug("%s: Tx desc_base: %p, base_pa: %08x, desc_count: %d\n",
  326. __func__, tx_desc->tx_base, tx_desc->tx_base_pa,
  327. tx_desc->tx_ring_size);
  328. memset(bd_va, 0, sizeof(struct buf_desc) * tx_desc->tx_ring_size);
  329. tx_buf_pa = pfe_addr->ddr_pfe_phys_baseaddr + HIF_TX_PKT_DDR_BASEADDR;
  330. for (i = 0; i < tx_desc->tx_ring_size; i++) {
  331. writel((unsigned long)(bd_pa + 1), &bd_va->next);
  332. writel(tx_buf_pa + (i * MAX_FRAME_SIZE), &bd_va->data);
  333. bd_va++;
  334. bd_pa++;
  335. }
  336. --bd_va;
  337. writel((u32)tx_desc->tx_base_pa, &bd_va->next);
  338. writel(tx_desc->tx_base_pa, HIF_TX_BDP_ADDR);
  339. g_tx_desc = tx_desc;
  340. return 0;
  341. }
  342. /*
  343. * PFE/Class initialization.
  344. */
  345. static void pfe_class_init(struct pfe_ddr_address *pfe_addr)
  346. {
  347. struct class_cfg class_cfg = {
  348. .route_table_baseaddr = pfe_addr->ddr_pfe_phys_baseaddr +
  349. ROUTE_TABLE_BASEADDR,
  350. .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
  351. };
  352. class_init(&class_cfg);
  353. debug("class init complete\n");
  354. }
  355. /*
  356. * PFE/TMU initialization.
  357. */
  358. static void pfe_tmu_init(struct pfe_ddr_address *pfe_addr)
  359. {
  360. struct tmu_cfg tmu_cfg = {
  361. .llm_base_addr = pfe_addr->ddr_pfe_phys_baseaddr
  362. + TMU_LLM_BASEADDR,
  363. .llm_queue_len = TMU_LLM_QUEUE_LEN,
  364. };
  365. tmu_init(&tmu_cfg);
  366. debug("tmu init complete\n");
  367. }
  368. /*
  369. * PFE/BMU (both BMU1 & BMU2) initialization.
  370. */
  371. static void pfe_bmu_init(struct pfe_ddr_address *pfe_addr)
  372. {
  373. struct bmu_cfg bmu1_cfg = {
  374. .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
  375. BMU1_LMEM_BASEADDR),
  376. .count = BMU1_BUF_COUNT,
  377. .size = BMU1_BUF_SIZE,
  378. };
  379. struct bmu_cfg bmu2_cfg = {
  380. .baseaddr = pfe_addr->ddr_pfe_phys_baseaddr + BMU2_DDR_BASEADDR,
  381. .count = BMU2_BUF_COUNT,
  382. .size = BMU2_BUF_SIZE,
  383. };
  384. bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
  385. debug("bmu1 init: done\n");
  386. bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
  387. debug("bmu2 init: done\n");
  388. }
  389. /*
  390. * PFE/GPI initialization function.
  391. * - egpi1, egpi2, egpi3, hgpi
  392. */
  393. static void pfe_gpi_init(struct pfe_ddr_address *pfe_addr)
  394. {
  395. struct gpi_cfg egpi1_cfg = {
  396. .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
  397. .tmlf_txthres = EGPI1_TMLF_TXTHRES,
  398. .aseq_len = EGPI1_ASEQ_LEN,
  399. };
  400. struct gpi_cfg egpi2_cfg = {
  401. .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
  402. .tmlf_txthres = EGPI2_TMLF_TXTHRES,
  403. .aseq_len = EGPI2_ASEQ_LEN,
  404. };
  405. struct gpi_cfg hgpi_cfg = {
  406. .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
  407. .tmlf_txthres = HGPI_TMLF_TXTHRES,
  408. .aseq_len = HGPI_ASEQ_LEN,
  409. };
  410. gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
  411. debug("GPI1 init complete\n");
  412. gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
  413. debug("GPI2 init complete\n");
  414. gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
  415. debug("HGPI init complete\n");
  416. }
  417. /*
  418. * PFE/HIF initialization function.
  419. */
  420. static int pfe_hif_init(struct pfe_ddr_address *pfe_addr)
  421. {
  422. int ret = 0;
  423. hif_tx_disable();
  424. hif_rx_disable();
  425. ret = hif_tx_desc_init(pfe_addr);
  426. if (ret)
  427. return ret;
  428. ret = hif_rx_desc_init(pfe_addr);
  429. if (ret)
  430. return ret;
  431. hif_init();
  432. hif_tx_enable();
  433. hif_rx_enable();
  434. hif_rx_desc_dump();
  435. hif_tx_desc_dump();
  436. debug("HIF init complete\n");
  437. return ret;
  438. }
  439. /*
  440. * PFE initialization
  441. * - Firmware loading (CLASS-PE and TMU-PE)
  442. * - BMU1 and BMU2 init
  443. * - GEMAC init
  444. * - GPI init
  445. * - CLASS-PE init
  446. * - TMU-PE init
  447. * - HIF tx and rx descriptors init
  448. *
  449. * @param[in] edev Pointer to eth device structure.
  450. *
  451. * @return 0, on success.
  452. */
  453. static int pfe_hw_init(struct pfe_ddr_address *pfe_addr)
  454. {
  455. int ret = 0;
  456. debug("%s: start\n", __func__);
  457. writel(0x3, CLASS_PE_SYS_CLK_RATIO);
  458. writel(0x3, TMU_PE_SYS_CLK_RATIO);
  459. writel(0x3, UTIL_PE_SYS_CLK_RATIO);
  460. udelay(10);
  461. pfe_class_init(pfe_addr);
  462. pfe_tmu_init(pfe_addr);
  463. pfe_bmu_init(pfe_addr);
  464. pfe_gpi_init(pfe_addr);
  465. ret = pfe_hif_init(pfe_addr);
  466. if (ret)
  467. return ret;
  468. bmu_enable(BMU1_BASE_ADDR);
  469. debug("bmu1 enabled\n");
  470. bmu_enable(BMU2_BASE_ADDR);
  471. debug("bmu2 enabled\n");
  472. debug("%s: done\n", __func__);
  473. return ret;
  474. }
  475. /*
  476. * PFE driver init function.
  477. * - Initializes pfe_lib
  478. * - pfe hw init
  479. * - fw loading and enables PEs
  480. * - should be executed once.
  481. *
  482. * @param[in] pfe Pointer the pfe control block
  483. */
  484. int pfe_drv_init(struct pfe_ddr_address *pfe_addr)
  485. {
  486. int ret = 0;
  487. pfe_lib_init();
  488. ret = pfe_hw_init(pfe_addr);
  489. if (ret)
  490. return ret;
  491. /* Load the class,TM, Util fw.
  492. * By now pfe is:
  493. * - out of reset + disabled + configured.
  494. * Fw loading should be done after pfe_hw_init()
  495. */
  496. /* It loads default inbuilt sbl firmware */
  497. pfe_firmware_init();
  498. return ret;
  499. }
  500. /*
  501. * PFE remove function
  502. * - stops PEs
  503. * - frees tx/rx descriptor resources
  504. * - should be called once.
  505. *
  506. * @param[in] pfe Pointer to pfe control block.
  507. */
  508. int pfe_eth_remove(struct udevice *dev)
  509. {
  510. if (g_tx_desc)
  511. free(g_tx_desc);
  512. if (g_rx_desc)
  513. free(g_rx_desc);
  514. pfe_firmware_exit();
  515. return 0;
  516. }