0019-target-riscv-rvv-1.0-remove-MLEN-calculations.patch 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. From 9ea07a7c23db4fb206a05a955f2c68985d3308fd Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Thu, 4 Jun 2020 00:00:11 +0800
  4. Subject: [PATCH 019/107] target/riscv: rvv-1.0: remove MLEN calculations
  5. As in RVV 1.0 design, MLEN is hardcoded with value 1 (Section 4.5).
  6. Thus, remove all MLEN related calculations.
  7. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  8. Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
  9. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  10. ---
  11. target/riscv/insn_trans/trans_rvv.c.inc | 35 +---
  12. target/riscv/internals.h | 9 +-
  13. target/riscv/translate.c | 2 -
  14. target/riscv/vector_helper.c | 250 ++++++++++--------------
  15. 4 files changed, 110 insertions(+), 186 deletions(-)
  16. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  17. index 56ce39e769..46e18a62b5 100644
  18. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  19. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  20. @@ -247,7 +247,6 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
  21. return false;
  22. }
  23. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  24. data = FIELD_DP32(data, VDATA, VM, a->vm);
  25. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  26. data = FIELD_DP32(data, VDATA, NF, a->nf);
  27. @@ -300,7 +299,6 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
  28. return false;
  29. }
  30. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  31. data = FIELD_DP32(data, VDATA, VM, a->vm);
  32. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  33. data = FIELD_DP32(data, VDATA, NF, a->nf);
  34. @@ -387,7 +385,6 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  35. return false;
  36. }
  37. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  38. data = FIELD_DP32(data, VDATA, VM, a->vm);
  39. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  40. data = FIELD_DP32(data, VDATA, NF, a->nf);
  41. @@ -426,7 +423,6 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  42. gen_helper_vsse_v_w, gen_helper_vsse_v_d }
  43. };
  44. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  45. data = FIELD_DP32(data, VDATA, VM, a->vm);
  46. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  47. data = FIELD_DP32(data, VDATA, NF, a->nf);
  48. @@ -518,7 +514,6 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  49. return false;
  50. }
  51. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  52. data = FIELD_DP32(data, VDATA, VM, a->vm);
  53. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  54. data = FIELD_DP32(data, VDATA, NF, a->nf);
  55. @@ -570,7 +565,6 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  56. return false;
  57. }
  58. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  59. data = FIELD_DP32(data, VDATA, VM, a->vm);
  60. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  61. data = FIELD_DP32(data, VDATA, NF, a->nf);
  62. @@ -649,7 +643,6 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
  63. return false;
  64. }
  65. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  66. data = FIELD_DP32(data, VDATA, VM, a->vm);
  67. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  68. data = FIELD_DP32(data, VDATA, NF, a->nf);
  69. @@ -760,7 +753,6 @@ static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
  70. }
  71. }
  72. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  73. data = FIELD_DP32(data, VDATA, VM, a->vm);
  74. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  75. data = FIELD_DP32(data, VDATA, WD, a->wd);
  76. @@ -839,7 +831,6 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
  77. } else {
  78. uint32_t data = 0;
  79. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  80. data = FIELD_DP32(data, VDATA, VM, a->vm);
  81. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  82. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
  83. @@ -885,7 +876,6 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
  84. src1 = tcg_temp_new();
  85. gen_get_gpr(src1, rs1);
  86. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  87. data = FIELD_DP32(data, VDATA, VM, vm);
  88. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  89. desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
  90. @@ -1034,7 +1024,6 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
  91. } else {
  92. src1 = tcg_const_tl(sextract64(imm, 0, 5));
  93. }
  94. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  95. data = FIELD_DP32(data, VDATA, VM, vm);
  96. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  97. desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
  98. @@ -1130,7 +1119,6 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
  99. TCGLabel *over = gen_new_label();
  100. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  101. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  102. data = FIELD_DP32(data, VDATA, VM, a->vm);
  103. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  104. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
  105. @@ -1219,7 +1207,6 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
  106. TCGLabel *over = gen_new_label();
  107. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  108. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  109. data = FIELD_DP32(data, VDATA, VM, a->vm);
  110. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  111. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
  112. @@ -1298,7 +1285,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  113. TCGLabel *over = gen_new_label(); \
  114. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  115. \
  116. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  117. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  118. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  119. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  120. @@ -1489,7 +1475,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  121. TCGLabel *over = gen_new_label(); \
  122. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  123. \
  124. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  125. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  126. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  127. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  128. @@ -1859,7 +1844,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  129. gen_set_rm(s, 7); \
  130. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  131. \
  132. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  133. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  134. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  135. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  136. @@ -1932,7 +1916,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  137. gen_helper_##NAME##_d, \
  138. }; \
  139. gen_set_rm(s, 7); \
  140. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  141. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  142. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  143. return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
  144. @@ -1973,7 +1956,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  145. gen_set_rm(s, 7); \
  146. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  147. \
  148. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  149. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  150. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  151. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  152. @@ -2011,7 +1993,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  153. gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
  154. }; \
  155. gen_set_rm(s, 7); \
  156. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  157. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  158. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  159. return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
  160. @@ -2048,7 +2029,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  161. gen_set_rm(s, 7); \
  162. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  163. \
  164. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  165. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  166. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  167. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  168. @@ -2084,7 +2064,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  169. gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
  170. }; \
  171. gen_set_rm(s, 7); \
  172. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  173. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  174. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  175. return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
  176. @@ -2164,7 +2143,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
  177. gen_set_rm(s, 7); \
  178. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  179. \
  180. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  181. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  182. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  183. tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  184. @@ -2307,7 +2285,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
  185. gen_set_rm(s, 7); \
  186. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  187. \
  188. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  189. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  190. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  191. tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  192. @@ -2356,7 +2333,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
  193. gen_set_rm(s, 7); \
  194. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  195. \
  196. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  197. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  198. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  199. tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  200. @@ -2419,7 +2395,6 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \
  201. TCGLabel *over = gen_new_label(); \
  202. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  203. \
  204. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  205. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  206. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
  207. vreg_ofs(s, a->rs1), \
  208. @@ -2449,7 +2424,6 @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
  209. TCGv dst;
  210. TCGv_i32 desc;
  211. uint32_t data = 0;
  212. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  213. data = FIELD_DP32(data, VDATA, VM, a->vm);
  214. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  215. @@ -2481,7 +2455,6 @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
  216. TCGv dst;
  217. TCGv_i32 desc;
  218. uint32_t data = 0;
  219. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  220. data = FIELD_DP32(data, VDATA, VM, a->vm);
  221. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  222. @@ -2517,7 +2490,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
  223. TCGLabel *over = gen_new_label(); \
  224. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
  225. \
  226. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
  227. data = FIELD_DP32(data, VDATA, VM, a->vm); \
  228. data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
  229. tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
  230. @@ -2545,7 +2517,6 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
  231. TCGLabel *over = gen_new_label();
  232. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  233. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  234. data = FIELD_DP32(data, VDATA, VM, a->vm);
  235. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  236. static gen_helper_gvec_3_ptr * const fns[4] = {
  237. @@ -2572,7 +2543,6 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
  238. TCGLabel *over = gen_new_label();
  239. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  240. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  241. data = FIELD_DP32(data, VDATA, VM, a->vm);
  242. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  243. static gen_helper_gvec_2_ptr * const fns[4] = {
  244. @@ -2863,7 +2833,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
  245. }
  246. if (a->vm && s->vl_eq_vlmax) {
  247. - int vlmax = s->vlen / s->mlen;
  248. + int vlmax = s->vlen;
  249. TCGv_i64 dest = tcg_temp_new_i64();
  250. if (a->rs1 == 0) {
  251. @@ -2894,7 +2864,7 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
  252. }
  253. if (a->vm && s->vl_eq_vlmax) {
  254. - if (a->rs1 >= s->vlen / s->mlen) {
  255. + if (a->rs1 >= s->vlen) {
  256. tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
  257. MAXSZ(s), MAXSZ(s), 0);
  258. } else {
  259. @@ -2934,7 +2904,6 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
  260. TCGLabel *over = gen_new_label();
  261. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  262. - data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
  263. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  264. tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
  265. vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
  266. diff --git a/target/riscv/internals.h b/target/riscv/internals.h
  267. index bce91da11a..81f5dfa477 100644
  268. --- a/target/riscv/internals.h
  269. +++ b/target/riscv/internals.h
  270. @@ -22,11 +22,10 @@
  271. #include "hw/registerfields.h"
  272. /* share data between vector helpers and decode code */
  273. -FIELD(VDATA, MLEN, 0, 8)
  274. -FIELD(VDATA, VM, 8, 1)
  275. -FIELD(VDATA, LMUL, 9, 2)
  276. -FIELD(VDATA, NF, 11, 4)
  277. -FIELD(VDATA, WD, 11, 1)
  278. +FIELD(VDATA, VM, 0, 1)
  279. +FIELD(VDATA, LMUL, 1, 3)
  280. +FIELD(VDATA, NF, 4, 4)
  281. +FIELD(VDATA, WD, 4, 1)
  282. /* float point classify helpers */
  283. target_ulong fclass_h(uint64_t frs1);
  284. diff --git a/target/riscv/translate.c b/target/riscv/translate.c
  285. index c42c52c90c..b18f76c344 100644
  286. --- a/target/riscv/translate.c
  287. +++ b/target/riscv/translate.c
  288. @@ -64,7 +64,6 @@ typedef struct DisasContext {
  289. uint8_t lmul;
  290. uint8_t sew;
  291. uint16_t vlen;
  292. - uint16_t mlen;
  293. bool vl_eq_vlmax;
  294. CPUState *cs;
  295. } DisasContext;
  296. @@ -696,7 +695,6 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
  297. ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
  298. ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
  299. ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
  300. - ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul);
  301. ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
  302. ctx->cs = cs;
  303. }
  304. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  305. index 3f1ee31146..dea1d190ed 100644
  306. --- a/target/riscv/vector_helper.c
  307. +++ b/target/riscv/vector_helper.c
  308. @@ -81,11 +81,6 @@ static inline uint32_t vext_nf(uint32_t desc)
  309. return FIELD_EX32(simd_data(desc), VDATA, NF);
  310. }
  311. -static inline uint32_t vext_mlen(uint32_t desc)
  312. -{
  313. - return FIELD_EX32(simd_data(desc), VDATA, MLEN);
  314. -}
  315. -
  316. static inline uint32_t vext_vm(uint32_t desc)
  317. {
  318. return FIELD_EX32(simd_data(desc), VDATA, VM);
  319. @@ -188,19 +183,24 @@ static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
  320. vext_clear(cur, cnt, tot);
  321. }
  322. -static inline void vext_set_elem_mask(void *v0, int mlen, int index,
  323. +static inline void vext_set_elem_mask(void *v0, int index,
  324. uint8_t value)
  325. {
  326. - int idx = (index * mlen) / 64;
  327. - int pos = (index * mlen) % 64;
  328. + int idx = index / 64;
  329. + int pos = index % 64;
  330. uint64_t old = ((uint64_t *)v0)[idx];
  331. - ((uint64_t *)v0)[idx] = deposit64(old, pos, mlen, value);
  332. + ((uint64_t *)v0)[idx] = deposit64(old, pos, 1, value);
  333. }
  334. -static inline int vext_elem_mask(void *v0, int mlen, int index)
  335. +/*
  336. + * Earlier designs (pre-0.9) had a varying number of bits
  337. + * per mask value (MLEN). In the 0.9 design, MLEN=1.
  338. + * (Section 4.5)
  339. + */
  340. +static inline int vext_elem_mask(void *v0, int index)
  341. {
  342. - int idx = (index * mlen) / 64;
  343. - int pos = (index * mlen) % 64;
  344. + int idx = index / 64;
  345. + int pos = index % 64;
  346. return (((uint64_t *)v0)[idx] >> pos) & 1;
  347. }
  348. @@ -277,12 +277,11 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  349. {
  350. uint32_t i, k;
  351. uint32_t nf = vext_nf(desc);
  352. - uint32_t mlen = vext_mlen(desc);
  353. uint32_t vlmax = vext_maxsz(desc) / esz;
  354. /* probe every access*/
  355. for (i = 0; i < env->vl; i++) {
  356. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  357. + if (!vm && !vext_elem_mask(v0, i)) {
  358. continue;
  359. }
  360. probe_pages(env, base + stride * i, nf * msz, ra, access_type);
  361. @@ -290,7 +289,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  362. /* do real access */
  363. for (i = 0; i < env->vl; i++) {
  364. k = 0;
  365. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  366. + if (!vm && !vext_elem_mask(v0, i)) {
  367. continue;
  368. }
  369. while (k < nf) {
  370. @@ -506,12 +505,11 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  371. uint32_t i, k;
  372. uint32_t nf = vext_nf(desc);
  373. uint32_t vm = vext_vm(desc);
  374. - uint32_t mlen = vext_mlen(desc);
  375. uint32_t vlmax = vext_maxsz(desc) / esz;
  376. /* probe every access*/
  377. for (i = 0; i < env->vl; i++) {
  378. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  379. + if (!vm && !vext_elem_mask(v0, i)) {
  380. continue;
  381. }
  382. probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra,
  383. @@ -520,7 +518,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  384. /* load bytes from guest memory */
  385. for (i = 0; i < env->vl; i++) {
  386. k = 0;
  387. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  388. + if (!vm && !vext_elem_mask(v0, i)) {
  389. continue;
  390. }
  391. while (k < nf) {
  392. @@ -604,7 +602,6 @@ vext_ldff(void *vd, void *v0, target_ulong base,
  393. {
  394. void *host;
  395. uint32_t i, k, vl = 0;
  396. - uint32_t mlen = vext_mlen(desc);
  397. uint32_t nf = vext_nf(desc);
  398. uint32_t vm = vext_vm(desc);
  399. uint32_t vlmax = vext_maxsz(desc) / esz;
  400. @@ -612,7 +609,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
  401. /* probe every access*/
  402. for (i = 0; i < env->vl; i++) {
  403. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  404. + if (!vm && !vext_elem_mask(v0, i)) {
  405. continue;
  406. }
  407. addr = base + nf * i * msz;
  408. @@ -653,7 +650,7 @@ ProbeSuccess:
  409. }
  410. for (i = 0; i < env->vl; i++) {
  411. k = 0;
  412. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  413. + if (!vm && !vext_elem_mask(v0, i)) {
  414. continue;
  415. }
  416. while (k < nf) {
  417. @@ -784,18 +781,17 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
  418. target_long addr;
  419. uint32_t wd = vext_wd(desc);
  420. uint32_t vm = vext_vm(desc);
  421. - uint32_t mlen = vext_mlen(desc);
  422. uint32_t vlmax = vext_maxsz(desc) / esz;
  423. for (i = 0; i < env->vl; i++) {
  424. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  425. + if (!vm && !vext_elem_mask(v0, i)) {
  426. continue;
  427. }
  428. probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD);
  429. probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE);
  430. }
  431. for (i = 0; i < env->vl; i++) {
  432. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  433. + if (!vm && !vext_elem_mask(v0, i)) {
  434. continue;
  435. }
  436. addr = get_index_addr(base, i, vs2);
  437. @@ -911,13 +907,12 @@ static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
  438. opivv2_fn *fn, clear_fn *clearfn)
  439. {
  440. uint32_t vlmax = vext_maxsz(desc) / esz;
  441. - uint32_t mlen = vext_mlen(desc);
  442. uint32_t vm = vext_vm(desc);
  443. uint32_t vl = env->vl;
  444. uint32_t i;
  445. for (i = 0; i < vl; i++) {
  446. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  447. + if (!vm && !vext_elem_mask(v0, i)) {
  448. continue;
  449. }
  450. fn(vd, vs1, vs2, i);
  451. @@ -976,13 +971,12 @@ static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
  452. opivx2_fn fn, clear_fn *clearfn)
  453. {
  454. uint32_t vlmax = vext_maxsz(desc) / esz;
  455. - uint32_t mlen = vext_mlen(desc);
  456. uint32_t vm = vext_vm(desc);
  457. uint32_t vl = env->vl;
  458. uint32_t i;
  459. for (i = 0; i < vl; i++) {
  460. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  461. + if (!vm && !vext_elem_mask(v0, i)) {
  462. continue;
  463. }
  464. fn(vd, s1, vs2, i);
  465. @@ -1172,7 +1166,6 @@ GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq)
  466. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  467. CPURISCVState *env, uint32_t desc) \
  468. { \
  469. - uint32_t mlen = vext_mlen(desc); \
  470. uint32_t vl = env->vl; \
  471. uint32_t esz = sizeof(ETYPE); \
  472. uint32_t vlmax = vext_maxsz(desc) / esz; \
  473. @@ -1181,7 +1174,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  474. for (i = 0; i < vl; i++) { \
  475. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  476. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  477. - uint8_t carry = vext_elem_mask(v0, mlen, i); \
  478. + uint8_t carry = vext_elem_mask(v0, i); \
  479. \
  480. *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \
  481. } \
  482. @@ -1202,7 +1195,6 @@ GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq)
  483. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  484. CPURISCVState *env, uint32_t desc) \
  485. { \
  486. - uint32_t mlen = vext_mlen(desc); \
  487. uint32_t vl = env->vl; \
  488. uint32_t esz = sizeof(ETYPE); \
  489. uint32_t vlmax = vext_maxsz(desc) / esz; \
  490. @@ -1210,7 +1202,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  491. \
  492. for (i = 0; i < vl; i++) { \
  493. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  494. - uint8_t carry = vext_elem_mask(v0, mlen, i); \
  495. + uint8_t carry = vext_elem_mask(v0, i); \
  496. \
  497. *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
  498. } \
  499. @@ -1235,7 +1227,6 @@ GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq)
  500. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  501. CPURISCVState *env, uint32_t desc) \
  502. { \
  503. - uint32_t mlen = vext_mlen(desc); \
  504. uint32_t vl = env->vl; \
  505. uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  506. uint32_t i; \
  507. @@ -1243,12 +1234,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  508. for (i = 0; i < vl; i++) { \
  509. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  510. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  511. - uint8_t carry = vext_elem_mask(v0, mlen, i); \
  512. + uint8_t carry = vext_elem_mask(v0, i); \
  513. \
  514. - vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, carry));\
  515. + vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry)); \
  516. } \
  517. for (; i < vlmax; i++) { \
  518. - vext_set_elem_mask(vd, mlen, i, 0); \
  519. + vext_set_elem_mask(vd, i, 0); \
  520. } \
  521. }
  522. @@ -1266,20 +1257,19 @@ GEN_VEXT_VMADC_VVM(vmsbc_vvm_d, uint64_t, H8, DO_MSBC)
  523. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  524. void *vs2, CPURISCVState *env, uint32_t desc) \
  525. { \
  526. - uint32_t mlen = vext_mlen(desc); \
  527. uint32_t vl = env->vl; \
  528. uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  529. uint32_t i; \
  530. \
  531. for (i = 0; i < vl; i++) { \
  532. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  533. - uint8_t carry = vext_elem_mask(v0, mlen, i); \
  534. + uint8_t carry = vext_elem_mask(v0, i); \
  535. \
  536. - vext_set_elem_mask(vd, mlen, i, \
  537. + vext_set_elem_mask(vd, i, \
  538. DO_OP(s2, (ETYPE)(target_long)s1, carry)); \
  539. } \
  540. for (; i < vlmax; i++) { \
  541. - vext_set_elem_mask(vd, mlen, i, 0); \
  542. + vext_set_elem_mask(vd, i, 0); \
  543. } \
  544. }
  545. @@ -1353,7 +1343,6 @@ GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq)
  546. void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  547. void *vs2, CPURISCVState *env, uint32_t desc) \
  548. { \
  549. - uint32_t mlen = vext_mlen(desc); \
  550. uint32_t vm = vext_vm(desc); \
  551. uint32_t vl = env->vl; \
  552. uint32_t esz = sizeof(TS1); \
  553. @@ -1361,7 +1350,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  554. uint32_t i; \
  555. \
  556. for (i = 0; i < vl; i++) { \
  557. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  558. + if (!vm && !vext_elem_mask(v0, i)) { \
  559. continue; \
  560. } \
  561. TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
  562. @@ -1391,7 +1380,6 @@ GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
  563. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  564. void *vs2, CPURISCVState *env, uint32_t desc) \
  565. { \
  566. - uint32_t mlen = vext_mlen(desc); \
  567. uint32_t vm = vext_vm(desc); \
  568. uint32_t vl = env->vl; \
  569. uint32_t esz = sizeof(TD); \
  570. @@ -1399,7 +1387,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  571. uint32_t i; \
  572. \
  573. for (i = 0; i < vl; i++) { \
  574. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  575. + if (!vm && !vext_elem_mask(v0, i)) { \
  576. continue; \
  577. } \
  578. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  579. @@ -1448,7 +1436,6 @@ GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
  580. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  581. CPURISCVState *env, uint32_t desc) \
  582. { \
  583. - uint32_t mlen = vext_mlen(desc); \
  584. uint32_t vm = vext_vm(desc); \
  585. uint32_t vl = env->vl; \
  586. uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  587. @@ -1457,13 +1444,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  588. for (i = 0; i < vl; i++) { \
  589. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  590. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  591. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  592. + if (!vm && !vext_elem_mask(v0, i)) { \
  593. continue; \
  594. } \
  595. - vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1)); \
  596. + vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \
  597. } \
  598. for (; i < vlmax; i++) { \
  599. - vext_set_elem_mask(vd, mlen, i, 0); \
  600. + vext_set_elem_mask(vd, i, 0); \
  601. } \
  602. }
  603. @@ -1501,7 +1488,6 @@ GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE)
  604. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  605. CPURISCVState *env, uint32_t desc) \
  606. { \
  607. - uint32_t mlen = vext_mlen(desc); \
  608. uint32_t vm = vext_vm(desc); \
  609. uint32_t vl = env->vl; \
  610. uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  611. @@ -1509,14 +1495,14 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  612. \
  613. for (i = 0; i < vl; i++) { \
  614. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  615. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  616. + if (!vm && !vext_elem_mask(v0, i)) { \
  617. continue; \
  618. } \
  619. - vext_set_elem_mask(vd, mlen, i, \
  620. + vext_set_elem_mask(vd, i, \
  621. DO_OP(s2, (ETYPE)(target_long)s1)); \
  622. } \
  623. for (; i < vlmax; i++) { \
  624. - vext_set_elem_mask(vd, mlen, i, 0); \
  625. + vext_set_elem_mask(vd, i, 0); \
  626. } \
  627. }
  628. @@ -2078,14 +2064,13 @@ GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq)
  629. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  630. CPURISCVState *env, uint32_t desc) \
  631. { \
  632. - uint32_t mlen = vext_mlen(desc); \
  633. uint32_t vl = env->vl; \
  634. uint32_t esz = sizeof(ETYPE); \
  635. uint32_t vlmax = vext_maxsz(desc) / esz; \
  636. uint32_t i; \
  637. \
  638. for (i = 0; i < vl; i++) { \
  639. - ETYPE *vt = (!vext_elem_mask(v0, mlen, i) ? vs2 : vs1); \
  640. + ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
  641. *((ETYPE *)vd + H(i)) = *(vt + H(i)); \
  642. } \
  643. CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  644. @@ -2100,7 +2085,6 @@ GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq)
  645. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  646. void *vs2, CPURISCVState *env, uint32_t desc) \
  647. { \
  648. - uint32_t mlen = vext_mlen(desc); \
  649. uint32_t vl = env->vl; \
  650. uint32_t esz = sizeof(ETYPE); \
  651. uint32_t vlmax = vext_maxsz(desc) / esz; \
  652. @@ -2108,7 +2092,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  653. \
  654. for (i = 0; i < vl; i++) { \
  655. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  656. - ETYPE d = (!vext_elem_mask(v0, mlen, i) ? s2 : \
  657. + ETYPE d = (!vext_elem_mask(v0, i) ? s2 : \
  658. (ETYPE)(target_long)s1); \
  659. *((ETYPE *)vd + H(i)) = d; \
  660. } \
  661. @@ -2146,11 +2130,11 @@ do_##NAME(void *vd, void *vs1, void *vs2, int i, \
  662. static inline void
  663. vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
  664. CPURISCVState *env,
  665. - uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm,
  666. + uint32_t vl, uint32_t vm, int vxrm,
  667. opivv2_rm_fn *fn)
  668. {
  669. for (uint32_t i = 0; i < vl; i++) {
  670. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  671. + if (!vm && !vext_elem_mask(v0, i)) {
  672. continue;
  673. }
  674. fn(vd, vs1, vs2, i, env, vxrm);
  675. @@ -2164,26 +2148,25 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
  676. opivv2_rm_fn *fn, clear_fn *clearfn)
  677. {
  678. uint32_t vlmax = vext_maxsz(desc) / esz;
  679. - uint32_t mlen = vext_mlen(desc);
  680. uint32_t vm = vext_vm(desc);
  681. uint32_t vl = env->vl;
  682. switch (env->vxrm) {
  683. case 0: /* rnu */
  684. vext_vv_rm_1(vd, v0, vs1, vs2,
  685. - env, vl, vm, mlen, 0, fn);
  686. + env, vl, vm, 0, fn);
  687. break;
  688. case 1: /* rne */
  689. vext_vv_rm_1(vd, v0, vs1, vs2,
  690. - env, vl, vm, mlen, 1, fn);
  691. + env, vl, vm, 1, fn);
  692. break;
  693. case 2: /* rdn */
  694. vext_vv_rm_1(vd, v0, vs1, vs2,
  695. - env, vl, vm, mlen, 2, fn);
  696. + env, vl, vm, 2, fn);
  697. break;
  698. default: /* rod */
  699. vext_vv_rm_1(vd, v0, vs1, vs2,
  700. - env, vl, vm, mlen, 3, fn);
  701. + env, vl, vm, 3, fn);
  702. break;
  703. }
  704. @@ -2266,11 +2249,11 @@ do_##NAME(void *vd, target_long s1, void *vs2, int i, \
  705. static inline void
  706. vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
  707. CPURISCVState *env,
  708. - uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm,
  709. + uint32_t vl, uint32_t vm, int vxrm,
  710. opivx2_rm_fn *fn)
  711. {
  712. for (uint32_t i = 0; i < vl; i++) {
  713. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  714. + if (!vm && !vext_elem_mask(v0, i)) {
  715. continue;
  716. }
  717. fn(vd, s1, vs2, i, env, vxrm);
  718. @@ -2284,26 +2267,25 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
  719. opivx2_rm_fn *fn, clear_fn *clearfn)
  720. {
  721. uint32_t vlmax = vext_maxsz(desc) / esz;
  722. - uint32_t mlen = vext_mlen(desc);
  723. uint32_t vm = vext_vm(desc);
  724. uint32_t vl = env->vl;
  725. switch (env->vxrm) {
  726. case 0: /* rnu */
  727. vext_vx_rm_1(vd, v0, s1, vs2,
  728. - env, vl, vm, mlen, 0, fn);
  729. + env, vl, vm, 0, fn);
  730. break;
  731. case 1: /* rne */
  732. vext_vx_rm_1(vd, v0, s1, vs2,
  733. - env, vl, vm, mlen, 1, fn);
  734. + env, vl, vm, 1, fn);
  735. break;
  736. case 2: /* rdn */
  737. vext_vx_rm_1(vd, v0, s1, vs2,
  738. - env, vl, vm, mlen, 2, fn);
  739. + env, vl, vm, 2, fn);
  740. break;
  741. default: /* rod */
  742. vext_vx_rm_1(vd, v0, s1, vs2,
  743. - env, vl, vm, mlen, 3, fn);
  744. + env, vl, vm, 3, fn);
  745. break;
  746. }
  747. @@ -3188,13 +3170,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  748. uint32_t desc) \
  749. { \
  750. uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  751. - uint32_t mlen = vext_mlen(desc); \
  752. uint32_t vm = vext_vm(desc); \
  753. uint32_t vl = env->vl; \
  754. uint32_t i; \
  755. \
  756. for (i = 0; i < vl; i++) { \
  757. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  758. + if (!vm && !vext_elem_mask(v0, i)) { \
  759. continue; \
  760. } \
  761. do_##NAME(vd, vs1, vs2, i, env); \
  762. @@ -3223,13 +3204,12 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
  763. uint32_t desc) \
  764. { \
  765. uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  766. - uint32_t mlen = vext_mlen(desc); \
  767. uint32_t vm = vext_vm(desc); \
  768. uint32_t vl = env->vl; \
  769. uint32_t i; \
  770. \
  771. for (i = 0; i < vl; i++) { \
  772. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  773. + if (!vm && !vext_elem_mask(v0, i)) { \
  774. continue; \
  775. } \
  776. do_##NAME(vd, s1, vs2, i, env); \
  777. @@ -3794,7 +3774,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  778. CPURISCVState *env, uint32_t desc) \
  779. { \
  780. uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  781. - uint32_t mlen = vext_mlen(desc); \
  782. uint32_t vm = vext_vm(desc); \
  783. uint32_t vl = env->vl; \
  784. uint32_t i; \
  785. @@ -3803,7 +3782,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  786. return; \
  787. } \
  788. for (i = 0; i < vl; i++) { \
  789. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  790. + if (!vm && !vext_elem_mask(v0, i)) { \
  791. continue; \
  792. } \
  793. do_##NAME(vd, vs2, i, env); \
  794. @@ -3935,7 +3914,6 @@ GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq)
  795. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  796. CPURISCVState *env, uint32_t desc) \
  797. { \
  798. - uint32_t mlen = vext_mlen(desc); \
  799. uint32_t vm = vext_vm(desc); \
  800. uint32_t vl = env->vl; \
  801. uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  802. @@ -3944,14 +3922,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  803. for (i = 0; i < vl; i++) { \
  804. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  805. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  806. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  807. + if (!vm && !vext_elem_mask(v0, i)) { \
  808. continue; \
  809. } \
  810. - vext_set_elem_mask(vd, mlen, i, \
  811. + vext_set_elem_mask(vd, i, \
  812. DO_OP(s2, s1, &env->fp_status)); \
  813. } \
  814. for (; i < vlmax; i++) { \
  815. - vext_set_elem_mask(vd, mlen, i, 0); \
  816. + vext_set_elem_mask(vd, i, 0); \
  817. } \
  818. }
  819. @@ -3963,7 +3941,6 @@ GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet)
  820. void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  821. CPURISCVState *env, uint32_t desc) \
  822. { \
  823. - uint32_t mlen = vext_mlen(desc); \
  824. uint32_t vm = vext_vm(desc); \
  825. uint32_t vl = env->vl; \
  826. uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
  827. @@ -3971,14 +3948,14 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  828. \
  829. for (i = 0; i < vl; i++) { \
  830. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  831. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  832. + if (!vm && !vext_elem_mask(v0, i)) { \
  833. continue; \
  834. } \
  835. - vext_set_elem_mask(vd, mlen, i, \
  836. + vext_set_elem_mask(vd, i, \
  837. DO_OP(s2, (ETYPE)s1, &env->fp_status)); \
  838. } \
  839. for (; i < vlmax; i++) { \
  840. - vext_set_elem_mask(vd, mlen, i, 0); \
  841. + vext_set_elem_mask(vd, i, 0); \
  842. } \
  843. }
  844. @@ -4092,13 +4069,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  845. CPURISCVState *env, uint32_t desc) \
  846. { \
  847. uint32_t vlmax = vext_maxsz(desc) / ESZ; \
  848. - uint32_t mlen = vext_mlen(desc); \
  849. uint32_t vm = vext_vm(desc); \
  850. uint32_t vl = env->vl; \
  851. uint32_t i; \
  852. \
  853. for (i = 0; i < vl; i++) { \
  854. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  855. + if (!vm && !vext_elem_mask(v0, i)) { \
  856. continue; \
  857. } \
  858. do_##NAME(vd, vs2, i); \
  859. @@ -4175,7 +4151,6 @@ GEN_VEXT_V(vfclass_v_d, 8, 8, clearq)
  860. void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  861. CPURISCVState *env, uint32_t desc) \
  862. { \
  863. - uint32_t mlen = vext_mlen(desc); \
  864. uint32_t vm = vext_vm(desc); \
  865. uint32_t vl = env->vl; \
  866. uint32_t esz = sizeof(ETYPE); \
  867. @@ -4185,7 +4160,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  868. for (i = 0; i < vl; i++) { \
  869. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  870. *((ETYPE *)vd + H(i)) \
  871. - = (!vm && !vext_elem_mask(v0, mlen, i) ? s2 : s1); \
  872. + = (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \
  873. } \
  874. CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
  875. }
  876. @@ -4316,7 +4291,6 @@ GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl)
  877. void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  878. void *vs2, CPURISCVState *env, uint32_t desc) \
  879. { \
  880. - uint32_t mlen = vext_mlen(desc); \
  881. uint32_t vm = vext_vm(desc); \
  882. uint32_t vl = env->vl; \
  883. uint32_t i; \
  884. @@ -4325,7 +4299,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  885. \
  886. for (i = 0; i < vl; i++) { \
  887. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  888. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  889. + if (!vm && !vext_elem_mask(v0, i)) { \
  890. continue; \
  891. } \
  892. s1 = OP(s1, (TD)s2); \
  893. @@ -4399,7 +4373,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  894. void *vs2, CPURISCVState *env, \
  895. uint32_t desc) \
  896. { \
  897. - uint32_t mlen = vext_mlen(desc); \
  898. uint32_t vm = vext_vm(desc); \
  899. uint32_t vl = env->vl; \
  900. uint32_t i; \
  901. @@ -4408,7 +4381,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  902. \
  903. for (i = 0; i < vl; i++) { \
  904. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  905. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  906. + if (!vm && !vext_elem_mask(v0, i)) { \
  907. continue; \
  908. } \
  909. s1 = OP(s1, (TD)s2, &env->fp_status); \
  910. @@ -4437,7 +4410,6 @@ GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq)
  911. void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  912. void *vs2, CPURISCVState *env, uint32_t desc)
  913. {
  914. - uint32_t mlen = vext_mlen(desc);
  915. uint32_t vm = vext_vm(desc);
  916. uint32_t vl = env->vl;
  917. uint32_t i;
  918. @@ -4446,7 +4418,7 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  919. for (i = 0; i < vl; i++) {
  920. uint16_t s2 = *((uint16_t *)vs2 + H2(i));
  921. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  922. + if (!vm && !vext_elem_mask(v0, i)) {
  923. continue;
  924. }
  925. s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
  926. @@ -4459,7 +4431,6 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  927. void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  928. void *vs2, CPURISCVState *env, uint32_t desc)
  929. {
  930. - uint32_t mlen = vext_mlen(desc);
  931. uint32_t vm = vext_vm(desc);
  932. uint32_t vl = env->vl;
  933. uint32_t i;
  934. @@ -4468,7 +4439,7 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  935. for (i = 0; i < vl; i++) {
  936. uint32_t s2 = *((uint32_t *)vs2 + H4(i));
  937. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  938. + if (!vm && !vext_elem_mask(v0, i)) {
  939. continue;
  940. }
  941. s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
  942. @@ -4487,19 +4458,18 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  943. void *vs2, CPURISCVState *env, \
  944. uint32_t desc) \
  945. { \
  946. - uint32_t mlen = vext_mlen(desc); \
  947. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  948. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  949. uint32_t vl = env->vl; \
  950. uint32_t i; \
  951. int a, b; \
  952. \
  953. for (i = 0; i < vl; i++) { \
  954. - a = vext_elem_mask(vs1, mlen, i); \
  955. - b = vext_elem_mask(vs2, mlen, i); \
  956. - vext_set_elem_mask(vd, mlen, i, OP(b, a)); \
  957. + a = vext_elem_mask(vs1, i); \
  958. + b = vext_elem_mask(vs2, i); \
  959. + vext_set_elem_mask(vd, i, OP(b, a)); \
  960. } \
  961. for (; i < vlmax; i++) { \
  962. - vext_set_elem_mask(vd, mlen, i, 0); \
  963. + vext_set_elem_mask(vd, i, 0); \
  964. } \
  965. }
  966. @@ -4523,14 +4493,13 @@ target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env,
  967. uint32_t desc)
  968. {
  969. target_ulong cnt = 0;
  970. - uint32_t mlen = vext_mlen(desc);
  971. uint32_t vm = vext_vm(desc);
  972. uint32_t vl = env->vl;
  973. int i;
  974. for (i = 0; i < vl; i++) {
  975. - if (vm || vext_elem_mask(v0, mlen, i)) {
  976. - if (vext_elem_mask(vs2, mlen, i)) {
  977. + if (vm || vext_elem_mask(v0, i)) {
  978. + if (vext_elem_mask(vs2, i)) {
  979. cnt++;
  980. }
  981. }
  982. @@ -4542,14 +4511,13 @@ target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env,
  983. target_ulong HELPER(vmfirst_m)(void *v0, void *vs2, CPURISCVState *env,
  984. uint32_t desc)
  985. {
  986. - uint32_t mlen = vext_mlen(desc);
  987. uint32_t vm = vext_vm(desc);
  988. uint32_t vl = env->vl;
  989. int i;
  990. for (i = 0; i < vl; i++) {
  991. - if (vm || vext_elem_mask(v0, mlen, i)) {
  992. - if (vext_elem_mask(vs2, mlen, i)) {
  993. + if (vm || vext_elem_mask(v0, i)) {
  994. + if (vext_elem_mask(vs2, i)) {
  995. return i;
  996. }
  997. }
  998. @@ -4566,39 +4534,38 @@ enum set_mask_type {
  999. static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
  1000. uint32_t desc, enum set_mask_type type)
  1001. {
  1002. - uint32_t mlen = vext_mlen(desc);
  1003. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;
  1004. + uint32_t vlmax = env_archcpu(env)->cfg.vlen;
  1005. uint32_t vm = vext_vm(desc);
  1006. uint32_t vl = env->vl;
  1007. int i;
  1008. bool first_mask_bit = false;
  1009. for (i = 0; i < vl; i++) {
  1010. - if (!vm && !vext_elem_mask(v0, mlen, i)) {
  1011. + if (!vm && !vext_elem_mask(v0, i)) {
  1012. continue;
  1013. }
  1014. /* write a zero to all following active elements */
  1015. if (first_mask_bit) {
  1016. - vext_set_elem_mask(vd, mlen, i, 0);
  1017. + vext_set_elem_mask(vd, i, 0);
  1018. continue;
  1019. }
  1020. - if (vext_elem_mask(vs2, mlen, i)) {
  1021. + if (vext_elem_mask(vs2, i)) {
  1022. first_mask_bit = true;
  1023. if (type == BEFORE_FIRST) {
  1024. - vext_set_elem_mask(vd, mlen, i, 0);
  1025. + vext_set_elem_mask(vd, i, 0);
  1026. } else {
  1027. - vext_set_elem_mask(vd, mlen, i, 1);
  1028. + vext_set_elem_mask(vd, i, 1);
  1029. }
  1030. } else {
  1031. if (type == ONLY_FIRST) {
  1032. - vext_set_elem_mask(vd, mlen, i, 0);
  1033. + vext_set_elem_mask(vd, i, 0);
  1034. } else {
  1035. - vext_set_elem_mask(vd, mlen, i, 1);
  1036. + vext_set_elem_mask(vd, i, 1);
  1037. }
  1038. }
  1039. }
  1040. for (; i < vlmax; i++) {
  1041. - vext_set_elem_mask(vd, mlen, i, 0);
  1042. + vext_set_elem_mask(vd, i, 0);
  1043. }
  1044. }
  1045. @@ -4625,19 +4592,18 @@ void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
  1046. void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
  1047. uint32_t desc) \
  1048. { \
  1049. - uint32_t mlen = vext_mlen(desc); \
  1050. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1051. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1052. uint32_t vm = vext_vm(desc); \
  1053. uint32_t vl = env->vl; \
  1054. uint32_t sum = 0; \
  1055. int i; \
  1056. \
  1057. for (i = 0; i < vl; i++) { \
  1058. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1059. + if (!vm && !vext_elem_mask(v0, i)) { \
  1060. continue; \
  1061. } \
  1062. *((ETYPE *)vd + H(i)) = sum; \
  1063. - if (vext_elem_mask(vs2, mlen, i)) { \
  1064. + if (vext_elem_mask(vs2, i)) { \
  1065. sum++; \
  1066. } \
  1067. } \
  1068. @@ -4653,14 +4619,13 @@ GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq)
  1069. #define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN) \
  1070. void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
  1071. { \
  1072. - uint32_t mlen = vext_mlen(desc); \
  1073. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1074. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1075. uint32_t vm = vext_vm(desc); \
  1076. uint32_t vl = env->vl; \
  1077. int i; \
  1078. \
  1079. for (i = 0; i < vl; i++) { \
  1080. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1081. + if (!vm && !vext_elem_mask(v0, i)) { \
  1082. continue; \
  1083. } \
  1084. *((ETYPE *)vd + H(i)) = i; \
  1085. @@ -4682,14 +4647,13 @@ GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq)
  1086. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  1087. CPURISCVState *env, uint32_t desc) \
  1088. { \
  1089. - uint32_t mlen = vext_mlen(desc); \
  1090. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1091. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1092. uint32_t vm = vext_vm(desc); \
  1093. uint32_t vl = env->vl; \
  1094. target_ulong offset = s1, i; \
  1095. \
  1096. for (i = offset; i < vl; i++) { \
  1097. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1098. + if (!vm && !vext_elem_mask(v0, i)) { \
  1099. continue; \
  1100. } \
  1101. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
  1102. @@ -4707,15 +4671,14 @@ GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq)
  1103. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  1104. CPURISCVState *env, uint32_t desc) \
  1105. { \
  1106. - uint32_t mlen = vext_mlen(desc); \
  1107. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1108. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1109. uint32_t vm = vext_vm(desc); \
  1110. uint32_t vl = env->vl; \
  1111. target_ulong offset = s1, i; \
  1112. \
  1113. for (i = 0; i < vl; ++i) { \
  1114. target_ulong j = i + offset; \
  1115. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1116. + if (!vm && !vext_elem_mask(v0, i)) { \
  1117. continue; \
  1118. } \
  1119. *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \
  1120. @@ -4733,14 +4696,13 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq)
  1121. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  1122. CPURISCVState *env, uint32_t desc) \
  1123. { \
  1124. - uint32_t mlen = vext_mlen(desc); \
  1125. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1126. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1127. uint32_t vm = vext_vm(desc); \
  1128. uint32_t vl = env->vl; \
  1129. uint32_t i; \
  1130. \
  1131. for (i = 0; i < vl; i++) { \
  1132. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1133. + if (!vm && !vext_elem_mask(v0, i)) { \
  1134. continue; \
  1135. } \
  1136. if (i == 0) { \
  1137. @@ -4762,14 +4724,13 @@ GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq)
  1138. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  1139. CPURISCVState *env, uint32_t desc) \
  1140. { \
  1141. - uint32_t mlen = vext_mlen(desc); \
  1142. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1143. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1144. uint32_t vm = vext_vm(desc); \
  1145. uint32_t vl = env->vl; \
  1146. uint32_t i; \
  1147. \
  1148. for (i = 0; i < vl; i++) { \
  1149. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1150. + if (!vm && !vext_elem_mask(v0, i)) { \
  1151. continue; \
  1152. } \
  1153. if (i == vl - 1) { \
  1154. @@ -4792,15 +4753,14 @@ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
  1155. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  1156. CPURISCVState *env, uint32_t desc) \
  1157. { \
  1158. - uint32_t mlen = vext_mlen(desc); \
  1159. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1160. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1161. uint32_t vm = vext_vm(desc); \
  1162. uint32_t vl = env->vl; \
  1163. uint64_t index; \
  1164. uint32_t i; \
  1165. \
  1166. for (i = 0; i < vl; i++) { \
  1167. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1168. + if (!vm && !vext_elem_mask(v0, i)) { \
  1169. continue; \
  1170. } \
  1171. index = *((ETYPE *)vs1 + H(i)); \
  1172. @@ -4823,15 +4783,14 @@ GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq)
  1173. void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  1174. CPURISCVState *env, uint32_t desc) \
  1175. { \
  1176. - uint32_t mlen = vext_mlen(desc); \
  1177. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1178. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1179. uint32_t vm = vext_vm(desc); \
  1180. uint32_t vl = env->vl; \
  1181. uint64_t index = s1; \
  1182. uint32_t i; \
  1183. \
  1184. for (i = 0; i < vl; i++) { \
  1185. - if (!vm && !vext_elem_mask(v0, mlen, i)) { \
  1186. + if (!vm && !vext_elem_mask(v0, i)) { \
  1187. continue; \
  1188. } \
  1189. if (index >= vlmax) { \
  1190. @@ -4854,13 +4813,12 @@ GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq)
  1191. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  1192. CPURISCVState *env, uint32_t desc) \
  1193. { \
  1194. - uint32_t mlen = vext_mlen(desc); \
  1195. - uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
  1196. + uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
  1197. uint32_t vl = env->vl; \
  1198. uint32_t num = 0, i; \
  1199. \
  1200. for (i = 0; i < vl; i++) { \
  1201. - if (!vext_elem_mask(vs1, mlen, i)) { \
  1202. + if (!vext_elem_mask(vs1, i)) { \
  1203. continue; \
  1204. } \
  1205. *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \
  1206. --
  1207. 2.33.1