cvmx-fpa3.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020 Marvell International Ltd.
  4. *
  5. * Interface to the CN78XX Free Pool Allocator, a.k.a. FPA3
  6. */
  7. #include "cvmx-address.h"
  8. #include "cvmx-fpa-defs.h"
  9. #include "cvmx-scratch.h"
  10. #ifndef __CVMX_FPA3_H__
  11. #define __CVMX_FPA3_H__
  12. typedef struct {
  13. unsigned res0 : 6;
  14. unsigned node : 2;
  15. unsigned res1 : 2;
  16. unsigned lpool : 6;
  17. unsigned valid_magic : 16;
  18. } cvmx_fpa3_pool_t;
  19. typedef struct {
  20. unsigned res0 : 6;
  21. unsigned node : 2;
  22. unsigned res1 : 6;
  23. unsigned laura : 10;
  24. unsigned valid_magic : 16;
  25. } cvmx_fpa3_gaura_t;
  26. #define CVMX_FPA3_VALID_MAGIC 0xf9a3
  27. #define CVMX_FPA3_INVALID_GAURA ((cvmx_fpa3_gaura_t){ 0, 0, 0, 0, 0 })
  28. #define CVMX_FPA3_INVALID_POOL ((cvmx_fpa3_pool_t){ 0, 0, 0, 0, 0 })
  29. static inline bool __cvmx_fpa3_aura_valid(cvmx_fpa3_gaura_t aura)
  30. {
  31. if (aura.valid_magic != CVMX_FPA3_VALID_MAGIC)
  32. return false;
  33. return true;
  34. }
  35. static inline bool __cvmx_fpa3_pool_valid(cvmx_fpa3_pool_t pool)
  36. {
  37. if (pool.valid_magic != CVMX_FPA3_VALID_MAGIC)
  38. return false;
  39. return true;
  40. }
  41. static inline cvmx_fpa3_gaura_t __cvmx_fpa3_gaura(int node, int laura)
  42. {
  43. cvmx_fpa3_gaura_t aura;
  44. if (node < 0)
  45. node = cvmx_get_node_num();
  46. if (laura < 0)
  47. return CVMX_FPA3_INVALID_GAURA;
  48. aura.node = node;
  49. aura.laura = laura;
  50. aura.valid_magic = CVMX_FPA3_VALID_MAGIC;
  51. return aura;
  52. }
  53. static inline cvmx_fpa3_pool_t __cvmx_fpa3_pool(int node, int lpool)
  54. {
  55. cvmx_fpa3_pool_t pool;
  56. if (node < 0)
  57. node = cvmx_get_node_num();
  58. if (lpool < 0)
  59. return CVMX_FPA3_INVALID_POOL;
  60. pool.node = node;
  61. pool.lpool = lpool;
  62. pool.valid_magic = CVMX_FPA3_VALID_MAGIC;
  63. return pool;
  64. }
  65. #undef CVMX_FPA3_VALID_MAGIC
  66. /**
  67. * Structure describing the data format used for stores to the FPA.
  68. */
  69. typedef union {
  70. u64 u64;
  71. struct {
  72. u64 scraddr : 8;
  73. u64 len : 8;
  74. u64 did : 8;
  75. u64 addr : 40;
  76. } s;
  77. struct {
  78. u64 scraddr : 8;
  79. u64 len : 8;
  80. u64 did : 8;
  81. u64 node : 4;
  82. u64 red : 1;
  83. u64 reserved2 : 9;
  84. u64 aura : 10;
  85. u64 reserved3 : 16;
  86. } cn78xx;
  87. } cvmx_fpa3_iobdma_data_t;
  88. /**
  89. * Struct describing load allocate operation addresses for FPA pool.
  90. */
  91. union cvmx_fpa3_load_data {
  92. u64 u64;
  93. struct {
  94. u64 seg : 2;
  95. u64 reserved1 : 13;
  96. u64 io : 1;
  97. u64 did : 8;
  98. u64 node : 4;
  99. u64 red : 1;
  100. u64 reserved2 : 9;
  101. u64 aura : 10;
  102. u64 reserved3 : 16;
  103. };
  104. };
  105. typedef union cvmx_fpa3_load_data cvmx_fpa3_load_data_t;
  106. /**
  107. * Struct describing store free operation addresses from FPA pool.
  108. */
  109. union cvmx_fpa3_store_addr {
  110. u64 u64;
  111. struct {
  112. u64 seg : 2;
  113. u64 reserved1 : 13;
  114. u64 io : 1;
  115. u64 did : 8;
  116. u64 node : 4;
  117. u64 reserved2 : 10;
  118. u64 aura : 10;
  119. u64 fabs : 1;
  120. u64 reserved3 : 3;
  121. u64 dwb_count : 9;
  122. u64 reserved4 : 3;
  123. };
  124. };
  125. typedef union cvmx_fpa3_store_addr cvmx_fpa3_store_addr_t;
  126. enum cvmx_fpa3_pool_alignment_e {
  127. FPA_NATURAL_ALIGNMENT,
  128. FPA_OFFSET_ALIGNMENT,
  129. FPA_OPAQUE_ALIGNMENT
  130. };
  131. #define CVMX_FPA3_AURAX_LIMIT_MAX ((1ull << 40) - 1)
  132. /**
  133. * @INTERNAL
  134. * Accessor functions to return number of POOLS in an FPA3
  135. * depending on SoC model.
  136. * The number is per-node for models supporting multi-node configurations.
  137. */
  138. static inline int cvmx_fpa3_num_pools(void)
  139. {
  140. if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  141. return 64;
  142. if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
  143. return 32;
  144. if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  145. return 32;
  146. printf("ERROR: %s: Unknowm model\n", __func__);
  147. return -1;
  148. }
  149. /**
  150. * @INTERNAL
  151. * Accessor functions to return number of AURAS in an FPA3
  152. * depending on SoC model.
  153. * The number is per-node for models supporting multi-node configurations.
  154. */
  155. static inline int cvmx_fpa3_num_auras(void)
  156. {
  157. if (OCTEON_IS_MODEL(OCTEON_CN78XX))
  158. return 1024;
  159. if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
  160. return 512;
  161. if (OCTEON_IS_MODEL(OCTEON_CN73XX))
  162. return 512;
  163. printf("ERROR: %s: Unknowm model\n", __func__);
  164. return -1;
  165. }
  166. /**
  167. * Get the FPA3 POOL underneath FPA3 AURA, containing all its buffers
  168. *
  169. */
  170. static inline cvmx_fpa3_pool_t cvmx_fpa3_aura_to_pool(cvmx_fpa3_gaura_t aura)
  171. {
  172. cvmx_fpa3_pool_t pool;
  173. cvmx_fpa_aurax_pool_t aurax_pool;
  174. aurax_pool.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura));
  175. pool = __cvmx_fpa3_pool(aura.node, aurax_pool.s.pool);
  176. return pool;
  177. }
  178. /**
  179. * Get a new block from the FPA pool
  180. *
  181. * @param aura - aura number
  182. * Return: pointer to the block or NULL on failure
  183. */
  184. static inline void *cvmx_fpa3_alloc(cvmx_fpa3_gaura_t aura)
  185. {
  186. u64 address;
  187. cvmx_fpa3_load_data_t load_addr;
  188. load_addr.u64 = 0;
  189. load_addr.seg = CVMX_MIPS_SPACE_XKPHYS;
  190. load_addr.io = 1;
  191. load_addr.did = 0x29; /* Device ID. Indicates FPA. */
  192. load_addr.node = aura.node;
  193. load_addr.red = 0; /* Perform RED on allocation.
  194. * FIXME to use config option
  195. */
  196. load_addr.aura = aura.laura;
  197. address = cvmx_read64_uint64(load_addr.u64);
  198. if (!address)
  199. return NULL;
  200. return cvmx_phys_to_ptr(address);
  201. }
  202. /**
  203. * Asynchronously get a new block from the FPA
  204. *
  205. * The result of cvmx_fpa_async_alloc() may be retrieved using
  206. * cvmx_fpa_async_alloc_finish().
  207. *
  208. * @param scr_addr Local scratch address to put response in. This is a byte
  209. * address but must be 8 byte aligned.
  210. * @param aura Global aura to get the block from
  211. */
  212. static inline void cvmx_fpa3_async_alloc(u64 scr_addr, cvmx_fpa3_gaura_t aura)
  213. {
  214. cvmx_fpa3_iobdma_data_t data;
  215. /* Hardware only uses 64 bit aligned locations, so convert from byte
  216. * address to 64-bit index
  217. */
  218. data.u64 = 0ull;
  219. data.cn78xx.scraddr = scr_addr >> 3;
  220. data.cn78xx.len = 1;
  221. data.cn78xx.did = 0x29;
  222. data.cn78xx.node = aura.node;
  223. data.cn78xx.aura = aura.laura;
  224. cvmx_scratch_write64(scr_addr, 0ull);
  225. CVMX_SYNCW;
  226. cvmx_send_single(data.u64);
  227. }
  228. /**
  229. * Retrieve the result of cvmx_fpa3_async_alloc
  230. *
  231. * @param scr_addr The Local scratch address. Must be the same value
  232. * passed to cvmx_fpa_async_alloc().
  233. *
  234. * @param aura Global aura the block came from. Must be the same value
  235. * passed to cvmx_fpa_async_alloc.
  236. *
  237. * Return: Pointer to the block or NULL on failure
  238. */
  239. static inline void *cvmx_fpa3_async_alloc_finish(u64 scr_addr, cvmx_fpa3_gaura_t aura)
  240. {
  241. u64 address;
  242. CVMX_SYNCIOBDMA;
  243. address = cvmx_scratch_read64(scr_addr);
  244. if (cvmx_likely(address))
  245. return cvmx_phys_to_ptr(address);
  246. else
  247. /* Try regular alloc if async failed */
  248. return cvmx_fpa3_alloc(aura);
  249. }
  250. /**
  251. * Free a pointer back to the pool.
  252. *
  253. * @param aura global aura number
  254. * @param ptr physical address of block to free.
  255. * @param num_cache_lines Cache lines to invalidate
  256. */
  257. static inline void cvmx_fpa3_free(void *ptr, cvmx_fpa3_gaura_t aura, unsigned int num_cache_lines)
  258. {
  259. cvmx_fpa3_store_addr_t newptr;
  260. cvmx_addr_t newdata;
  261. newdata.u64 = cvmx_ptr_to_phys(ptr);
  262. /* Make sure that any previous writes to memory go out before we free
  263. this buffer. This also serves as a barrier to prevent GCC from
  264. reordering operations to after the free. */
  265. CVMX_SYNCWS;
  266. newptr.u64 = 0;
  267. newptr.seg = CVMX_MIPS_SPACE_XKPHYS;
  268. newptr.io = 1;
  269. newptr.did = 0x29; /* Device id, indicates FPA */
  270. newptr.node = aura.node;
  271. newptr.aura = aura.laura;
  272. newptr.fabs = 0; /* Free absolute. FIXME to use config option */
  273. newptr.dwb_count = num_cache_lines;
  274. cvmx_write_io(newptr.u64, newdata.u64);
  275. }
  276. /**
  277. * Free a pointer back to the pool without flushing the write buffer.
  278. *
  279. * @param aura global aura number
  280. * @param ptr physical address of block to free.
  281. * @param num_cache_lines Cache lines to invalidate
  282. */
  283. static inline void cvmx_fpa3_free_nosync(void *ptr, cvmx_fpa3_gaura_t aura,
  284. unsigned int num_cache_lines)
  285. {
  286. cvmx_fpa3_store_addr_t newptr;
  287. cvmx_addr_t newdata;
  288. newdata.u64 = cvmx_ptr_to_phys(ptr);
  289. /* Prevent GCC from reordering writes to (*ptr) */
  290. asm volatile("" : : : "memory");
  291. newptr.u64 = 0;
  292. newptr.seg = CVMX_MIPS_SPACE_XKPHYS;
  293. newptr.io = 1;
  294. newptr.did = 0x29; /* Device id, indicates FPA */
  295. newptr.node = aura.node;
  296. newptr.aura = aura.laura;
  297. newptr.fabs = 0; /* Free absolute. FIXME to use config option */
  298. newptr.dwb_count = num_cache_lines;
  299. cvmx_write_io(newptr.u64, newdata.u64);
  300. }
  301. static inline int cvmx_fpa3_pool_is_enabled(cvmx_fpa3_pool_t pool)
  302. {
  303. cvmx_fpa_poolx_cfg_t pool_cfg;
  304. if (!__cvmx_fpa3_pool_valid(pool))
  305. return -1;
  306. pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
  307. return pool_cfg.cn78xx.ena;
  308. }
  309. static inline int cvmx_fpa3_config_red_params(unsigned int node, int qos_avg_en, int red_lvl_dly,
  310. int avg_dly)
  311. {
  312. cvmx_fpa_gen_cfg_t fpa_cfg;
  313. cvmx_fpa_red_delay_t red_delay;
  314. fpa_cfg.u64 = cvmx_read_csr_node(node, CVMX_FPA_GEN_CFG);
  315. fpa_cfg.s.avg_en = qos_avg_en;
  316. fpa_cfg.s.lvl_dly = red_lvl_dly;
  317. cvmx_write_csr_node(node, CVMX_FPA_GEN_CFG, fpa_cfg.u64);
  318. red_delay.u64 = cvmx_read_csr_node(node, CVMX_FPA_RED_DELAY);
  319. red_delay.s.avg_dly = avg_dly;
  320. cvmx_write_csr_node(node, CVMX_FPA_RED_DELAY, red_delay.u64);
  321. return 0;
  322. }
  323. /**
  324. * Gets the buffer size of the specified pool,
  325. *
  326. * @param aura Global aura number
  327. * Return: Returns size of the buffers in the specified pool.
  328. */
  329. static inline int cvmx_fpa3_get_aura_buf_size(cvmx_fpa3_gaura_t aura)
  330. {
  331. cvmx_fpa3_pool_t pool;
  332. cvmx_fpa_poolx_cfg_t pool_cfg;
  333. int block_size;
  334. pool = cvmx_fpa3_aura_to_pool(aura);
  335. pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
  336. block_size = pool_cfg.cn78xx.buf_size << 7;
  337. return block_size;
  338. }
  339. /**
  340. * Return the number of available buffers in an AURA
  341. *
  342. * @param aura to receive count for
  343. * Return: available buffer count
  344. */
  345. static inline long long cvmx_fpa3_get_available(cvmx_fpa3_gaura_t aura)
  346. {
  347. cvmx_fpa3_pool_t pool;
  348. cvmx_fpa_poolx_available_t avail_reg;
  349. cvmx_fpa_aurax_cnt_t cnt_reg;
  350. cvmx_fpa_aurax_cnt_limit_t limit_reg;
  351. long long ret;
  352. pool = cvmx_fpa3_aura_to_pool(aura);
  353. /* Get POOL available buffer count */
  354. avail_reg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
  355. /* Get AURA current available count */
  356. cnt_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura));
  357. limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));
  358. if (limit_reg.cn78xx.limit < cnt_reg.cn78xx.cnt)
  359. return 0;
  360. /* Calculate AURA-based buffer allowance */
  361. ret = limit_reg.cn78xx.limit - cnt_reg.cn78xx.cnt;
  362. /* Use POOL real buffer availability when less then allowance */
  363. if (ret > (long long)avail_reg.cn78xx.count)
  364. ret = avail_reg.cn78xx.count;
  365. return ret;
  366. }
  367. /**
  368. * Configure the QoS parameters of an FPA3 AURA
  369. *
  370. * @param aura is the FPA3 AURA handle
  371. * @param ena_bp enables backpressure when outstanding count exceeds 'bp_thresh'
  372. * @param ena_red enables random early discard when outstanding count exceeds 'pass_thresh'
  373. * @param pass_thresh is the maximum count to invoke flow control
  374. * @param drop_thresh is the count threshold to begin dropping packets
  375. * @param bp_thresh is the back-pressure threshold
  376. *
  377. */
  378. static inline void cvmx_fpa3_setup_aura_qos(cvmx_fpa3_gaura_t aura, bool ena_red, u64 pass_thresh,
  379. u64 drop_thresh, bool ena_bp, u64 bp_thresh)
  380. {
  381. unsigned int shift = 0;
  382. u64 shift_thresh;
  383. cvmx_fpa_aurax_cnt_limit_t limit_reg;
  384. cvmx_fpa_aurax_cnt_levels_t aura_level;
  385. if (!__cvmx_fpa3_aura_valid(aura))
  386. return;
  387. /* Get AURAX count limit for validation */
  388. limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));
  389. if (pass_thresh < 256)
  390. pass_thresh = 255;
  391. if (drop_thresh <= pass_thresh || drop_thresh > limit_reg.cn78xx.limit)
  392. drop_thresh = limit_reg.cn78xx.limit;
  393. if (bp_thresh < 256 || bp_thresh > limit_reg.cn78xx.limit)
  394. bp_thresh = limit_reg.cn78xx.limit >> 1;
  395. shift_thresh = (bp_thresh > drop_thresh) ? bp_thresh : drop_thresh;
  396. /* Calculate shift so that the largest threshold fits in 8 bits */
  397. for (shift = 0; shift < (1 << 6); shift++) {
  398. if (0 == ((shift_thresh >> shift) & ~0xffull))
  399. break;
  400. };
  401. aura_level.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura));
  402. aura_level.s.pass = pass_thresh >> shift;
  403. aura_level.s.drop = drop_thresh >> shift;
  404. aura_level.s.bp = bp_thresh >> shift;
  405. aura_level.s.shift = shift;
  406. aura_level.s.red_ena = ena_red;
  407. aura_level.s.bp_ena = ena_bp;
  408. cvmx_write_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura), aura_level.u64);
  409. }
  410. cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num);
  411. int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura);
  412. cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num);
  413. int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool);
  414. int cvmx_fpa3_is_aura_available(int node, int aura_num);
  415. int cvmx_fpa3_is_pool_available(int node, int pool_num);
  416. cvmx_fpa3_pool_t cvmx_fpa3_setup_fill_pool(int node, int desired_pool, const char *name,
  417. unsigned int block_size, unsigned int num_blocks,
  418. void *buffer);
  419. /**
  420. * Function to attach an aura to an existing pool
  421. *
  422. * @param node - configure fpa on this node
  423. * @param pool - configured pool to attach aura to
  424. * @param desired_aura - pointer to aura to use, set to -1 to allocate
  425. * @param name - name to register
  426. * @param block_size - size of buffers to use
  427. * @param num_blocks - number of blocks to allocate
  428. *
  429. * Return: configured gaura on success, CVMX_FPA3_INVALID_GAURA on failure
  430. */
  431. cvmx_fpa3_gaura_t cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool, int desired_aura,
  432. const char *name, unsigned int block_size,
  433. unsigned int num_blocks);
  434. /**
  435. * Function to setup and initialize a pool.
  436. *
  437. * @param node - configure fpa on this node
  438. * @param desired_aura - aura to use, -1 for dynamic allocation
  439. * @param name - name to register
  440. * @param block_size - size of buffers in pool
  441. * @param num_blocks - max number of buffers allowed
  442. */
  443. cvmx_fpa3_gaura_t cvmx_fpa3_setup_aura_and_pool(int node, int desired_aura, const char *name,
  444. void *buffer, unsigned int block_size,
  445. unsigned int num_blocks);
  446. int cvmx_fpa3_shutdown_aura_and_pool(cvmx_fpa3_gaura_t aura);
  447. int cvmx_fpa3_shutdown_aura(cvmx_fpa3_gaura_t aura);
  448. int cvmx_fpa3_shutdown_pool(cvmx_fpa3_pool_t pool);
  449. const char *cvmx_fpa3_get_pool_name(cvmx_fpa3_pool_t pool);
  450. int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool);
  451. const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura);
  452. /* FIXME: Need a different macro for stage2 of u-boot */
  453. static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen,
  454. int buffer_sz, int buf_cnt)
  455. {
  456. cvmx_fpa_poolx_cfg_t pool_cfg;
  457. /* Configure pool stack */
  458. cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr);
  459. cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr);
  460. cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen);
  461. /* Configure pool with buffer size */
  462. pool_cfg.u64 = 0;
  463. pool_cfg.cn78xx.nat_align = 1;
  464. pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
  465. pool_cfg.cn78xx.l_type = 0x2;
  466. pool_cfg.cn78xx.ena = 0;
  467. cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
  468. /* Reset pool before starting */
  469. pool_cfg.cn78xx.ena = 1;
  470. cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
  471. cvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0);
  472. cvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt);
  473. cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool);
  474. }
  475. static inline void cvmx_fpa3_stage2_disable(int aura, int pool)
  476. {
  477. cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0);
  478. cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0);
  479. cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0);
  480. cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0);
  481. cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0);
  482. }
  483. #endif /* __CVMX_FPA3_H__ */