0023-target-riscv-introduce-more-imm-value-modes-in-trans.patch 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. From 305f1792e72938914801e9d20b1b2f2d16d57cc7 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Fri, 7 Aug 2020 15:28:58 +0800
  4. Subject: [PATCH 023/107] target/riscv: introduce more imm value modes in
  5. translator functions
  6. Immediate value in translator function is extended not only
  7. zero-extended and sign-extended but with more modes to be applicable
  8. with multiple formats of vector instructions.
  9. * IMM_ZX: Zero-extended
  10. * IMM_SX: Sign-extended
  11. * IMM_TRUNC_SEW: Truncate to log(SEW) bit
  12. * IMM_TRUNC_2SEW: Truncate to log(2*SEW) bit
  13. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  14. Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
  15. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  16. ---
  17. target/riscv/insn_trans/trans_rvv.c.inc | 115 ++++++++++++++----------
  18. 1 file changed, 66 insertions(+), 49 deletions(-)
  19. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  20. index f666c64bbe..99d14caa14 100644
  21. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  22. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  23. @@ -1307,8 +1307,32 @@ static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
  24. GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
  25. +typedef enum {
  26. + IMM_ZX, /* Zero-extended */
  27. + IMM_SX, /* Sign-extended */
  28. + IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */
  29. + IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
  30. +} imm_mode_t;
  31. +
  32. +static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
  33. +{
  34. + switch (imm_mode) {
  35. + case IMM_ZX:
  36. + return extract64(imm, 0, 5);
  37. + case IMM_SX:
  38. + return sextract64(imm, 0, 5);
  39. + case IMM_TRUNC_SEW:
  40. + return extract64(imm, 0, s->sew + 3);
  41. + case IMM_TRUNC_2SEW:
  42. + return extract64(imm, 0, s->sew + 4);
  43. + default:
  44. + g_assert_not_reached();
  45. + }
  46. +}
  47. +
  48. static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
  49. - gen_helper_opivx *fn, DisasContext *s, int zx)
  50. + gen_helper_opivx *fn, DisasContext *s,
  51. + imm_mode_t imm_mode)
  52. {
  53. TCGv_ptr dest, src2, mask;
  54. TCGv src1;
  55. @@ -1321,11 +1345,8 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
  56. dest = tcg_temp_new_ptr();
  57. mask = tcg_temp_new_ptr();
  58. src2 = tcg_temp_new_ptr();
  59. - if (zx) {
  60. - src1 = tcg_const_tl(imm);
  61. - } else {
  62. - src1 = tcg_const_tl(sextract64(imm, 0, 5));
  63. - }
  64. + src1 = tcg_const_tl(extract_imm(s, imm, imm_mode));
  65. +
  66. data = FIELD_DP32(data, VDATA, VM, vm);
  67. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  68. desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
  69. @@ -1351,28 +1372,23 @@ typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
  70. static inline bool
  71. do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
  72. - gen_helper_opivx *fn, int zx)
  73. + gen_helper_opivx *fn, imm_mode_t imm_mode)
  74. {
  75. if (!opivx_check(s, a)) {
  76. return false;
  77. }
  78. if (a->vm && s->vl_eq_vlmax) {
  79. - if (zx) {
  80. - gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
  81. - extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
  82. - } else {
  83. - gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
  84. - sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
  85. - }
  86. + gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
  87. + extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
  88. mark_vs_dirty(s);
  89. return true;
  90. }
  91. - return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
  92. + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
  93. }
  94. /* OPIVI with GVEC IR */
  95. -#define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \
  96. +#define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
  97. static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  98. { \
  99. static gen_helper_opivx * const fns[4] = { \
  100. @@ -1380,10 +1396,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  101. gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
  102. }; \
  103. return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
  104. - fns[s->sew], ZX); \
  105. + fns[s->sew], IMM_MODE); \
  106. }
  107. -GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi)
  108. +GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
  109. static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
  110. int64_t c, uint32_t oprsz, uint32_t maxsz)
  111. @@ -1393,7 +1409,7 @@ static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
  112. tcg_temp_free_i64(tmp);
  113. }
  114. -GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
  115. +GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
  116. /* Vector Widening Integer Add/Subtract */
  117. @@ -1648,7 +1664,7 @@ GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
  118. GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
  119. /* OPIVI without GVEC IR */
  120. -#define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK) \
  121. +#define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
  122. static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  123. { \
  124. if (CHECK(s, a)) { \
  125. @@ -1657,13 +1673,13 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  126. gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
  127. }; \
  128. return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
  129. - fns[s->sew], s, ZX); \
  130. + fns[s->sew], s, IMM_MODE); \
  131. } \
  132. return false; \
  133. }
  134. -GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check)
  135. -GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check)
  136. +GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
  137. +GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
  138. /* Vector Bitwise Logical Instructions */
  139. GEN_OPIVV_GVEC_TRANS(vand_vv, and)
  140. @@ -1672,9 +1688,9 @@ GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
  141. GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
  142. GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
  143. GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
  144. -GEN_OPIVI_GVEC_TRANS(vand_vi, 0, vand_vx, andi)
  145. -GEN_OPIVI_GVEC_TRANS(vor_vi, 0, vor_vx, ori)
  146. -GEN_OPIVI_GVEC_TRANS(vxor_vi, 0, vxor_vx, xori)
  147. +GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
  148. +GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori)
  149. +GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
  150. /* Vector Single-Width Bit Shift Instructions */
  151. GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
  152. @@ -1725,9 +1741,9 @@ GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
  153. GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
  154. GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
  155. -GEN_OPIVI_GVEC_TRANS(vsll_vi, 1, vsll_vx, shli)
  156. -GEN_OPIVI_GVEC_TRANS(vsrl_vi, 1, vsrl_vx, shri)
  157. -GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx, sari)
  158. +GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_ZX, vsll_vx, shli)
  159. +GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_ZX, vsrl_vx, shri)
  160. +GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_ZX, vsra_vx, sari)
  161. /* Vector Narrowing Integer Right Shift Instructions */
  162. static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
  163. @@ -1792,7 +1808,7 @@ GEN_OPIVX_NARROW_TRANS(vnsra_vx)
  164. GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
  165. /* OPIVI with NARROW */
  166. -#define GEN_OPIVI_NARROW_TRANS(NAME, ZX, OPIVX) \
  167. +#define GEN_OPIVI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
  168. static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  169. { \
  170. if (opivx_narrow_check(s, a)) { \
  171. @@ -1802,13 +1818,13 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  172. gen_helper_##OPIVX##_w, \
  173. }; \
  174. return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
  175. - fns[s->sew], s, ZX); \
  176. + fns[s->sew], s, IMM_MODE); \
  177. } \
  178. return false; \
  179. }
  180. -GEN_OPIVI_NARROW_TRANS(vnsra_vi, 1, vnsra_vx)
  181. -GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx)
  182. +GEN_OPIVI_NARROW_TRANS(vnsra_vi, IMM_ZX, vnsra_vx)
  183. +GEN_OPIVI_NARROW_TRANS(vnsrl_vi, IMM_ZX, vnsrl_vx)
  184. /* Vector Integer Comparison Instructions */
  185. /*
  186. @@ -1846,12 +1862,12 @@ GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
  187. GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
  188. GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
  189. -GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check)
  190. -GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check)
  191. -GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check)
  192. -GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check)
  193. -GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check)
  194. -GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check)
  195. +GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
  196. +GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
  197. +GEN_OPIVI_TRANS(vmsleu_vi, IMM_ZX, vmsleu_vx, opivx_cmp_check)
  198. +GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
  199. +GEN_OPIVI_TRANS(vmsgtu_vi, IMM_ZX, vmsgtu_vx, opivx_cmp_check)
  200. +GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
  201. /* Vector Integer Min/Max Instructions */
  202. GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
  203. @@ -2027,7 +2043,7 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
  204. GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
  205. GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
  206. -GEN_OPIVI_TRANS(vmerge_vim, 0, vmerge_vxm, opivx_vadc_check)
  207. +GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
  208. /*
  209. *** Vector Fixed-Point Arithmetic Instructions
  210. @@ -2042,8 +2058,8 @@ GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
  211. GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
  212. GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
  213. GEN_OPIVX_TRANS(vssub_vx, opivx_check)
  214. -GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check)
  215. -GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check)
  216. +GEN_OPIVI_TRANS(vsaddu_vi, IMM_ZX, vsaddu_vx, opivx_check)
  217. +GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
  218. /* Vector Single-Width Averaging Add and Subtract */
  219. GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
  220. @@ -2070,16 +2086,16 @@ GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
  221. GEN_OPIVV_TRANS(vssra_vv, opivv_check)
  222. GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
  223. GEN_OPIVX_TRANS(vssra_vx, opivx_check)
  224. -GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check)
  225. -GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check)
  226. +GEN_OPIVI_TRANS(vssrl_vi, IMM_ZX, vssrl_vx, opivx_check)
  227. +GEN_OPIVI_TRANS(vssra_vi, IMM_SX, vssra_vx, opivx_check)
  228. /* Vector Narrowing Fixed-Point Clip Instructions */
  229. GEN_OPIVV_NARROW_TRANS(vnclipu_vv)
  230. GEN_OPIVV_NARROW_TRANS(vnclip_vv)
  231. GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
  232. GEN_OPIVX_NARROW_TRANS(vnclip_vx)
  233. -GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx)
  234. -GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
  235. +GEN_OPIVI_NARROW_TRANS(vnclipu_vi, IMM_ZX, vnclipu_vx)
  236. +GEN_OPIVI_NARROW_TRANS(vnclip_vi, IMM_ZX, vnclip_vx)
  237. /*
  238. *** Vector Float Point Arithmetic Instructions
  239. @@ -3053,7 +3069,7 @@ static bool slideup_check(DisasContext *s, arg_rmrr *a)
  240. GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
  241. GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
  242. -GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
  243. +GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
  244. static bool slidedown_check(DisasContext *s, arg_rmrr *a)
  245. {
  246. @@ -3064,7 +3080,7 @@ static bool slidedown_check(DisasContext *s, arg_rmrr *a)
  247. GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
  248. GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
  249. -GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, slidedown_check)
  250. +GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
  251. /* Vector Register Gather Instruction */
  252. static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
  253. @@ -3143,7 +3159,8 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
  254. gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
  255. gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
  256. };
  257. - return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1);
  258. + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
  259. + s, IMM_ZX);
  260. }
  261. return true;
  262. }
  263. --
  264. 2.33.1