0058-target-riscv-rvv-1.0-floating-point-slide-instructio.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. From 03f8694b21e0ea6ae46eedfe9ecaa6bc4b7863e7 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Mon, 3 Aug 2020 19:59:31 +0800
  4. Subject: [PATCH 058/107] target/riscv: rvv-1.0: floating-point slide
  5. instructions
  6. Add the following instructions:
  7. * vfslide1up.vf
  8. * vfslide1down.vf
  9. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  10. ---
  11. target/riscv/helper.h | 7 ++
  12. target/riscv/insn32.decode | 2 +
  13. target/riscv/insn_trans/trans_rvv.c.inc | 16 +++
  14. target/riscv/vector_helper.c | 141 ++++++++++++++++--------
  15. 4 files changed, 121 insertions(+), 45 deletions(-)
  16. diff --git a/target/riscv/helper.h b/target/riscv/helper.h
  17. index b582fe8720..444dbca071 100644
  18. --- a/target/riscv/helper.h
  19. +++ b/target/riscv/helper.h
  20. @@ -1146,6 +1146,13 @@ DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
  21. DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
  22. DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
  23. +DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32)
  24. +DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32)
  25. +DEF_HELPER_6(vfslide1up_vf_d, void, ptr, ptr, i64, ptr, env, i32)
  26. +DEF_HELPER_6(vfslide1down_vf_h, void, ptr, ptr, i64, ptr, env, i32)
  27. +DEF_HELPER_6(vfslide1down_vf_w, void, ptr, ptr, i64, ptr, env, i32)
  28. +DEF_HELPER_6(vfslide1down_vf_d, void, ptr, ptr, i64, ptr, env, i32)
  29. +
  30. DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
  31. DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
  32. DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
  33. diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
  34. index 0217ec19f8..169b73fdc4 100644
  35. --- a/target/riscv/insn32.decode
  36. +++ b/target/riscv/insn32.decode
  37. @@ -552,6 +552,8 @@ vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm
  38. vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm
  39. vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm
  40. vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm
  41. +vfslide1up_vf 001110 . ..... ..... 101 ..... 1010111 @r_vm
  42. +vfslide1down_vf 001111 . ..... ..... 101 ..... 1010111 @r_vm
  43. vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm
  44. vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm
  45. vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm
  46. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  47. index 73537bc3ad..c1aaf01bdc 100644
  48. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  49. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  50. @@ -3357,6 +3357,22 @@ GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
  51. GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
  52. GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
  53. +/* Vector Floating-Point Slide Instructions */
  54. +static bool fslideup_check(DisasContext *s, arg_rmrr *a)
  55. +{
  56. + return slideup_check(s, a) &&
  57. + require_rvf(s);
  58. +}
  59. +
  60. +static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
  61. +{
  62. + return slidedown_check(s, a) &&
  63. + require_rvf(s);
  64. +}
  65. +
  66. +GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
  67. +GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
  68. +
  69. /* Vector Register Gather Instruction */
  70. static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
  71. {
  72. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  73. index 745fb01aa1..3f9556f4d4 100644
  74. --- a/target/riscv/vector_helper.c
  75. +++ b/target/riscv/vector_helper.c
  76. @@ -4664,57 +4664,108 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2)
  77. GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
  78. GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
  79. -#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H) \
  80. -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  81. - CPURISCVState *env, uint32_t desc) \
  82. -{ \
  83. - uint32_t vm = vext_vm(desc); \
  84. - uint32_t vl = env->vl; \
  85. - uint32_t i; \
  86. - \
  87. - for (i = 0; i < vl; i++) { \
  88. - if (!vm && !vext_elem_mask(v0, i)) { \
  89. - continue; \
  90. - } \
  91. - if (i == 0) { \
  92. - *((ETYPE *)vd + H(i)) = s1; \
  93. - } else { \
  94. - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
  95. - } \
  96. - } \
  97. +#define GEN_VEXT_VSLIE1UP(ESZ, H) \
  98. +static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
  99. + CPURISCVState *env, uint32_t desc) \
  100. +{ \
  101. + typedef uint##ESZ##_t ETYPE; \
  102. + uint32_t vm = vext_vm(desc); \
  103. + uint32_t vl = env->vl; \
  104. + uint32_t i; \
  105. + \
  106. + for (i = 0; i < vl; i++) { \
  107. + if (!vm && !vext_elem_mask(v0, i)) { \
  108. + continue; \
  109. + } \
  110. + if (i == 0) { \
  111. + *((ETYPE *)vd + H(i)) = s1; \
  112. + } else { \
  113. + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
  114. + } \
  115. + } \
  116. +}
  117. +
  118. +GEN_VEXT_VSLIE1UP(8, H1)
  119. +GEN_VEXT_VSLIE1UP(16, H2)
  120. +GEN_VEXT_VSLIE1UP(32, H4)
  121. +GEN_VEXT_VSLIE1UP(64, H8)
  122. +
  123. +#define GEN_VEXT_VSLIDE1UP_VX(NAME, ESZ) \
  124. +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  125. + CPURISCVState *env, uint32_t desc) \
  126. +{ \
  127. + vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \
  128. }
  129. /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
  130. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1)
  131. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2)
  132. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4)
  133. -GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8)
  134. -
  135. -#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H) \
  136. -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  137. - CPURISCVState *env, uint32_t desc) \
  138. -{ \
  139. - uint32_t vm = vext_vm(desc); \
  140. - uint32_t vl = env->vl; \
  141. - uint32_t i; \
  142. - \
  143. - for (i = 0; i < vl; i++) { \
  144. - if (!vm && !vext_elem_mask(v0, i)) { \
  145. - continue; \
  146. - } \
  147. - if (i == vl - 1) { \
  148. - *((ETYPE *)vd + H(i)) = s1; \
  149. - } else { \
  150. - *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
  151. - } \
  152. - } \
  153. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, 8)
  154. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, 16)
  155. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32)
  156. +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64)
  157. +
  158. +#define GEN_VEXT_VSLIDE1DOWN(ESZ, H) \
  159. +static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
  160. + CPURISCVState *env, uint32_t desc) \
  161. +{ \
  162. + typedef uint##ESZ##_t ETYPE; \
  163. + uint32_t vm = vext_vm(desc); \
  164. + uint32_t vl = env->vl; \
  165. + uint32_t i; \
  166. + \
  167. + for (i = 0; i < vl; i++) { \
  168. + if (!vm && !vext_elem_mask(v0, i)) { \
  169. + continue; \
  170. + } \
  171. + if (i == vl - 1) { \
  172. + *((ETYPE *)vd + H(i)) = s1; \
  173. + } else { \
  174. + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
  175. + } \
  176. + } \
  177. +}
  178. +
  179. +GEN_VEXT_VSLIDE1DOWN(8, H1)
  180. +GEN_VEXT_VSLIDE1DOWN(16, H2)
  181. +GEN_VEXT_VSLIDE1DOWN(32, H4)
  182. +GEN_VEXT_VSLIDE1DOWN(64, H8)
  183. +
  184. +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ESZ) \
  185. +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  186. + CPURISCVState *env, uint32_t desc) \
  187. +{ \
  188. + vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \
  189. }
  190. /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
  191. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1)
  192. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2)
  193. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4)
  194. -GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
  195. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, 8)
  196. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, 16)
  197. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, 32)
  198. +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, 64)
  199. +
  200. +/* Vector Floating-Point Slide Instructions */
  201. +#define GEN_VEXT_VFSLIDE1UP_VF(NAME, ESZ) \
  202. +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  203. + CPURISCVState *env, uint32_t desc) \
  204. +{ \
  205. + vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \
  206. +}
  207. +
  208. +/* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */
  209. +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h, 16)
  210. +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w, 32)
  211. +GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d, 64)
  212. +
  213. +#define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, ESZ) \
  214. +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  215. + CPURISCVState *env, uint32_t desc) \
  216. +{ \
  217. + vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \
  218. +}
  219. +
  220. +/* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */
  221. +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h, 16)
  222. +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w, 32)
  223. +GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64)
  224. /* Vector Register Gather Instruction */
  225. #define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2) \
  226. --
  227. 2.33.1