0082-target-riscv-rvv-1.0-add-evl-parameter-to-vext_ldst_.patch 4.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. From d8a62ac2d3f365054d7788fea734152c3c250e79 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Wed, 24 Feb 2021 16:15:52 +0800
  4. Subject: [PATCH 082/107] target/riscv: rvv-1.0: add evl parameter to
  5. vext_ldst_us()
  6. rvv v0.10 adds vector unit-stride mask load/store instructions
  7. (vle1.v, vse1.v), which has:
  8. evl (effective vector length) = ceil(env-vl/8).
  9. The new instructions operate the same as unmasked byte loads and stores.
  10. Add evl parameter to reuse vext_ldst_us().
  11. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  12. ---
  13. target/riscv/vector_helper.c | 38 ++++++++++++++++++------------------
  14. 1 file changed, 19 insertions(+), 19 deletions(-)
  15. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  16. index 78787af258..0e0b392a27 100644
  17. --- a/target/riscv/vector_helper.c
  18. +++ b/target/riscv/vector_helper.c
  19. @@ -292,17 +292,17 @@ GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
  20. /* unmasked unit-stride load and store operation*/
  21. static void
  22. vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  23. - vext_ldst_elem_fn *ldst_elem,
  24. - uint32_t esz, uintptr_t ra, MMUAccessType access_type)
  25. + vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t evl,
  26. + uintptr_t ra, MMUAccessType access_type)
  27. {
  28. uint32_t i, k;
  29. uint32_t nf = vext_nf(desc);
  30. uint32_t max_elems = vext_max_elems(desc, esz);
  31. /* probe every access */
  32. - probe_pages(env, base, env->vl * (nf << esz), ra, access_type);
  33. + probe_pages(env, base, evl * (nf << esz), ra, access_type);
  34. /* load bytes from guest memory */
  35. - for (i = env->vstart; i < env->vl; i++) {
  36. + for (i = env->vstart; i < evl; i++) {
  37. k = 0;
  38. while (k < nf) {
  39. target_ulong addr = base + ((i * nf + k) << esz);
  40. @@ -332,7 +332,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  41. CPURISCVState *env, uint32_t desc) \
  42. { \
  43. vext_ldst_us(vd, base, env, desc, LOAD_FN, \
  44. - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
  45. + ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_LOAD); \
  46. }
  47. GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
  48. @@ -340,20 +340,20 @@ GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
  49. GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
  50. GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
  51. -#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
  52. -void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  53. - CPURISCVState *env, uint32_t desc) \
  54. -{ \
  55. - uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
  56. - vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
  57. - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
  58. -} \
  59. - \
  60. -void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  61. - CPURISCVState *env, uint32_t desc) \
  62. -{ \
  63. - vext_ldst_us(vd, base, env, desc, STORE_FN, \
  64. - ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
  65. +#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
  66. +void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
  67. + CPURISCVState *env, uint32_t desc) \
  68. +{ \
  69. + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
  70. + vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
  71. + ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
  72. +} \
  73. + \
  74. +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  75. + CPURISCVState *env, uint32_t desc) \
  76. +{ \
  77. + vext_ldst_us(vd, base, env, desc, STORE_FN, \
  78. + ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_STORE); \
  79. }
  80. GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
  81. --
  82. 2.33.1