0027-target-riscv-rvv-1.0-index-load-and-store-instructio.patch 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. From 4425aa70a79cdb195cc7dce9f3d44090609c903d Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Fri, 14 Aug 2020 18:07:31 +0800
  4. Subject: [PATCH 027/107] target/riscv: rvv-1.0: index load and store
  5. instructions
  6. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  7. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  8. ---
  9. target/riscv/helper.h | 67 ++++----
  10. target/riscv/insn32.decode | 21 ++-
  11. target/riscv/insn_trans/trans_rvv.c.inc | 209 ++++++++++++++++--------
  12. target/riscv/vector_helper.c | 89 +++++-----
  13. 4 files changed, 222 insertions(+), 164 deletions(-)
  14. diff --git a/target/riscv/helper.h b/target/riscv/helper.h
  15. index 3f4d460054..927d28d53a 100644
  16. --- a/target/riscv/helper.h
  17. +++ b/target/riscv/helper.h
  18. @@ -137,41 +137,38 @@ DEF_HELPER_6(vsse8_v, void, ptr, ptr, tl, tl, env, i32)
  19. DEF_HELPER_6(vsse16_v, void, ptr, ptr, tl, tl, env, i32)
  20. DEF_HELPER_6(vsse32_v, void, ptr, ptr, tl, tl, env, i32)
  21. DEF_HELPER_6(vsse64_v, void, ptr, ptr, tl, tl, env, i32)
  22. -DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
  23. -DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
  24. -DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
  25. -DEF_HELPER_6(vlxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
  26. -DEF_HELPER_6(vlxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
  27. -DEF_HELPER_6(vlxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
  28. -DEF_HELPER_6(vlxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
  29. -DEF_HELPER_6(vlxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  30. -DEF_HELPER_6(vlxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  31. -DEF_HELPER_6(vlxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
  32. -DEF_HELPER_6(vlxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
  33. -DEF_HELPER_6(vlxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
  34. -DEF_HELPER_6(vlxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
  35. -DEF_HELPER_6(vlxbu_v_b, void, ptr, ptr, tl, ptr, env, i32)
  36. -DEF_HELPER_6(vlxbu_v_h, void, ptr, ptr, tl, ptr, env, i32)
  37. -DEF_HELPER_6(vlxbu_v_w, void, ptr, ptr, tl, ptr, env, i32)
  38. -DEF_HELPER_6(vlxbu_v_d, void, ptr, ptr, tl, ptr, env, i32)
  39. -DEF_HELPER_6(vlxhu_v_h, void, ptr, ptr, tl, ptr, env, i32)
  40. -DEF_HELPER_6(vlxhu_v_w, void, ptr, ptr, tl, ptr, env, i32)
  41. -DEF_HELPER_6(vlxhu_v_d, void, ptr, ptr, tl, ptr, env, i32)
  42. -DEF_HELPER_6(vlxwu_v_w, void, ptr, ptr, tl, ptr, env, i32)
  43. -DEF_HELPER_6(vlxwu_v_d, void, ptr, ptr, tl, ptr, env, i32)
  44. -DEF_HELPER_6(vsxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
  45. -DEF_HELPER_6(vsxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
  46. -DEF_HELPER_6(vsxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
  47. -DEF_HELPER_6(vsxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
  48. -DEF_HELPER_6(vsxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
  49. -DEF_HELPER_6(vsxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
  50. -DEF_HELPER_6(vsxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
  51. -DEF_HELPER_6(vsxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
  52. -DEF_HELPER_6(vsxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
  53. -DEF_HELPER_6(vsxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
  54. -DEF_HELPER_6(vsxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
  55. -DEF_HELPER_6(vsxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
  56. -DEF_HELPER_6(vsxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
  57. +DEF_HELPER_6(vlxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
  58. +DEF_HELPER_6(vlxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
  59. +DEF_HELPER_6(vlxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  60. +DEF_HELPER_6(vlxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  61. +DEF_HELPER_6(vlxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
  62. +DEF_HELPER_6(vlxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
  63. +DEF_HELPER_6(vlxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  64. +DEF_HELPER_6(vlxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  65. +DEF_HELPER_6(vlxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
  66. +DEF_HELPER_6(vlxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
  67. +DEF_HELPER_6(vlxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  68. +DEF_HELPER_6(vlxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  69. +DEF_HELPER_6(vlxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
  70. +DEF_HELPER_6(vlxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
  71. +DEF_HELPER_6(vlxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  72. +DEF_HELPER_6(vlxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  73. +DEF_HELPER_6(vsxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
  74. +DEF_HELPER_6(vsxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
  75. +DEF_HELPER_6(vsxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
  76. +DEF_HELPER_6(vsxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
  77. +DEF_HELPER_6(vsxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
  78. +DEF_HELPER_6(vsxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
  79. +DEF_HELPER_6(vsxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
  80. +DEF_HELPER_6(vsxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
  81. +DEF_HELPER_6(vsxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
  82. +DEF_HELPER_6(vsxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
  83. +DEF_HELPER_6(vsxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
  84. +DEF_HELPER_6(vsxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
  85. +DEF_HELPER_6(vsxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
  86. +DEF_HELPER_6(vsxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
  87. +DEF_HELPER_6(vsxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
  88. +DEF_HELPER_6(vsxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
  89. DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32)
  90. DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32)
  91. DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32)
  92. diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
  93. index 03a1f6e53e..05c3c18028 100644
  94. --- a/target/riscv/insn32.decode
  95. +++ b/target/riscv/insn32.decode
  96. @@ -268,18 +268,17 @@ vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
  97. vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
  98. vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
  99. -vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
  100. -vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
  101. -vlxw_v ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm
  102. -vlxe_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm
  103. -vlxbu_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
  104. -vlxhu_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
  105. -vlxwu_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm
  106. +# Vector ordered-indexed and unordered-indexed load insns.
  107. +vlxei8_v ... 0-1 . ..... ..... 000 ..... 0000111 @r_nfvm
  108. +vlxei16_v ... 0-1 . ..... ..... 101 ..... 0000111 @r_nfvm
  109. +vlxei32_v ... 0-1 . ..... ..... 110 ..... 0000111 @r_nfvm
  110. +vlxei64_v ... 0-1 . ..... ..... 111 ..... 0000111 @r_nfvm
  111. +
  112. # Vector ordered-indexed and unordered-indexed store insns.
  113. -vsxb_v ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm
  114. -vsxh_v ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm
  115. -vsxw_v ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm
  116. -vsxe_v ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm
  117. +vsxei8_v ... 0-1 . ..... ..... 000 ..... 0100111 @r_nfvm
  118. +vsxei16_v ... 0-1 . ..... ..... 101 ..... 0100111 @r_nfvm
  119. +vsxei32_v ... 0-1 . ..... ..... 110 ..... 0100111 @r_nfvm
  120. +vsxei64_v ... 0-1 . ..... ..... 111 ..... 0100111 @r_nfvm
  121. #*** Vector AMO operations are encoded under the standard AMO major opcode ***
  122. vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
  123. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  124. index e4c83cf74d..74dd7ee387 100644
  125. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  126. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  127. @@ -126,12 +126,6 @@ static bool require_noover(const int8_t dst, const int8_t dst_lmul,
  128. return !is_overlapped(dst, dst_size, src, src_size);
  129. }
  130. -static bool require_noover_seg(const int8_t dst, const int8_t nf,
  131. - const int8_t src)
  132. -{
  133. - return !is_overlapped(dst, nf, src, 1);
  134. -}
  135. -
  136. static bool do_vsetvl(DisasContext *ctx, int rd, int rs1, TCGv s2)
  137. {
  138. TCGv s1, dst;
  139. @@ -224,9 +218,76 @@ static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
  140. return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
  141. }
  142. -static bool vext_check_isa_ill(DisasContext *s)
  143. +/*
  144. + * Vector indexed, indexed segment store check function.
  145. + *
  146. + * Rules to be checked here:
  147. + * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
  148. + * 2. Index vector register number is multiples of EMUL.
  149. + * (Section 3.3.2, 7.3)
  150. + * 3. Destination vector register number is multiples of LMUL.
  151. + * (Section 3.3.2, 7.3)
  152. + * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
  153. + * 5. Vector register numbers accessed by the segment load or store
  154. + * cannot increment past 31. (Section 7.8)
  155. + */
  156. +static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
  157. + uint8_t eew)
  158. {
  159. - return !s->vill;
  160. + int8_t emul = eew - s->sew + s->lmul;
  161. + return (emul >= -3 && emul <= 3) &&
  162. + require_align(vs2, emul) &&
  163. + require_align(vd, s->lmul) &&
  164. + require_nf(vd, nf, s->lmul);
  165. +}
  166. +
  167. +/*
  168. + * Vector indexed, indexed segment load check function.
  169. + *
  170. + * Rules to be checked here:
  171. + * 1. All rules applies to store instructions are applies
  172. + * to load instructions.
  173. + * 2. Destination vector register group for a masked vector
  174. + * instruction cannot overlap the source mask register (v0).
  175. + * (Section 5.3)
  176. + * 3. Destination vector register cannot overlap a source vector
  177. + * register (vs2) group.
  178. + * (Section 5.2)
  179. + * 4. Destination vector register groups cannot overlap
  180. + * the source vector register (vs2) group for
  181. + * indexed segment load instructions. (Section 7.8.3)
  182. + */
  183. +static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
  184. + int nf, int vm, uint8_t eew)
  185. +{
  186. + int8_t seg_vd;
  187. + int8_t emul = eew - s->sew + s->lmul;
  188. + bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
  189. + require_vm(vm, vd);
  190. +
  191. + /* Each segment register group has to follow overlap rules. */
  192. + for (int i = 0; i < nf; ++i) {
  193. + seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
  194. +
  195. + if (eew > s->sew) {
  196. + if (seg_vd != vs2) {
  197. + ret &= require_noover(seg_vd, s->lmul, vs2, emul);
  198. + }
  199. + } else if (eew < s->sew) {
  200. + ret &= require_noover(seg_vd, s->lmul, vs2, emul);
  201. + }
  202. +
  203. + /*
  204. + * Destination vector register groups cannot overlap
  205. + * the source vector register (vs2) group for
  206. + * indexed segment load instructions.
  207. + */
  208. + if (nf > 1) {
  209. + ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
  210. + vs2, 1 << MAX(emul, 0));
  211. + }
  212. + }
  213. + return ret;
  214. }
  215. static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
  216. @@ -747,31 +808,38 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
  217. return true;
  218. }
  219. -static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  220. +static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  221. {
  222. uint32_t data = 0;
  223. gen_helper_ldst_index *fn;
  224. - static gen_helper_ldst_index * const fns[7][4] = {
  225. - { gen_helper_vlxb_v_b, gen_helper_vlxb_v_h,
  226. - gen_helper_vlxb_v_w, gen_helper_vlxb_v_d },
  227. - { NULL, gen_helper_vlxh_v_h,
  228. - gen_helper_vlxh_v_w, gen_helper_vlxh_v_d },
  229. - { NULL, NULL,
  230. - gen_helper_vlxw_v_w, gen_helper_vlxw_v_d },
  231. - { gen_helper_vlxe_v_b, gen_helper_vlxe_v_h,
  232. - gen_helper_vlxe_v_w, gen_helper_vlxe_v_d },
  233. - { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h,
  234. - gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d },
  235. - { NULL, gen_helper_vlxhu_v_h,
  236. - gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d },
  237. - { NULL, NULL,
  238. - gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d },
  239. + static gen_helper_ldst_index * const fns[4][4] = {
  240. + /*
  241. + * offset vector register group EEW = 8,
  242. + * data vector register group EEW = SEW
  243. + */
  244. + { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
  245. + gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
  246. + /*
  247. + * offset vector register group EEW = 16,
  248. + * data vector register group EEW = SEW
  249. + */
  250. + { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
  251. + gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
  252. + /*
  253. + * offset vector register group EEW = 32,
  254. + * data vector register group EEW = SEW
  255. + */
  256. + { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
  257. + gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
  258. + /*
  259. + * offset vector register group EEW = 64,
  260. + * data vector register group EEW = SEW
  261. + */
  262. + { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
  263. + gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
  264. };
  265. - fn = fns[seq][s->sew];
  266. - if (fn == NULL) {
  267. - return false;
  268. - }
  269. + fn = fns[eew][s->sew];
  270. data = FIELD_DP32(data, VDATA, VM, a->vm);
  271. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  272. @@ -779,50 +847,50 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  273. return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
  274. }
  275. -/*
  276. - * For vector indexed segment loads, the destination vector register
  277. - * groups cannot overlap the source vector register group (specified by
  278. - * `vs2`), else an illegal instruction exception is raised.
  279. - */
  280. -static bool ld_index_check(DisasContext *s, arg_rnfvm* a)
  281. +static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
  282. {
  283. - return (vext_check_isa_ill(s) &&
  284. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  285. - vext_check_reg(s, a->rd, false) &&
  286. - vext_check_reg(s, a->rs2, false) &&
  287. - vext_check_nf(s, a->nf) &&
  288. - ((a->nf == 1) ||
  289. - vext_check_overlap_group(a->rd, a->nf << s->lmul,
  290. - a->rs2, 1 << s->lmul)));
  291. + return require_rvv(s) &&
  292. + vext_check_isa_ill(s) &&
  293. + vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
  294. }
  295. -GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check)
  296. -GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check)
  297. -GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check)
  298. -GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check)
  299. -GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check)
  300. -GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check)
  301. -GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check)
  302. +GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
  303. +GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
  304. +GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
  305. +GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
  306. -static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  307. +static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
  308. {
  309. uint32_t data = 0;
  310. gen_helper_ldst_index *fn;
  311. static gen_helper_ldst_index * const fns[4][4] = {
  312. - { gen_helper_vsxb_v_b, gen_helper_vsxb_v_h,
  313. - gen_helper_vsxb_v_w, gen_helper_vsxb_v_d },
  314. - { NULL, gen_helper_vsxh_v_h,
  315. - gen_helper_vsxh_v_w, gen_helper_vsxh_v_d },
  316. - { NULL, NULL,
  317. - gen_helper_vsxw_v_w, gen_helper_vsxw_v_d },
  318. - { gen_helper_vsxe_v_b, gen_helper_vsxe_v_h,
  319. - gen_helper_vsxe_v_w, gen_helper_vsxe_v_d }
  320. + /*
  321. + * offset vector register group EEW = 8,
  322. + * data vector register group EEW = SEW
  323. + */
  324. + { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
  325. + gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
  326. + /*
  327. + * offset vector register group EEW = 16,
  328. + * data vector register group EEW = SEW
  329. + */
  330. + { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
  331. + gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
  332. + /*
  333. + * offset vector register group EEW = 32,
  334. + * data vector register group EEW = SEW
  335. + */
  336. + { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
  337. + gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
  338. + /*
  339. + * offset vector register group EEW = 64,
  340. + * data vector register group EEW = SEW
  341. + */
  342. + { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
  343. + gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
  344. };
  345. - fn = fns[seq][s->sew];
  346. - if (fn == NULL) {
  347. - return false;
  348. - }
  349. + fn = fns[eew][s->sew];
  350. data = FIELD_DP32(data, VDATA, VM, a->vm);
  351. data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
  352. @@ -830,18 +898,17 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
  353. return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
  354. }
  355. -static bool st_index_check(DisasContext *s, arg_rnfvm* a)
  356. +static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
  357. {
  358. - return (vext_check_isa_ill(s) &&
  359. - vext_check_reg(s, a->rd, false) &&
  360. - vext_check_reg(s, a->rs2, false) &&
  361. - vext_check_nf(s, a->nf));
  362. + return require_rvv(s) &&
  363. + vext_check_isa_ill(s) &&
  364. + vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
  365. }
  366. -GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check)
  367. -GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check)
  368. -GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check)
  369. -GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check)
  370. +GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
  371. +GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
  372. +GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
  373. +GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
  374. /*
  375. *** unit stride fault-only-first load
  376. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  377. index ad45dd9006..95d367489b 100644
  378. --- a/target/riscv/vector_helper.c
  379. +++ b/target/riscv/vector_helper.c
  380. @@ -371,8 +371,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  381. void *vs2, CPURISCVState *env, uint32_t desc,
  382. vext_get_index_addr get_index_addr,
  383. vext_ldst_elem_fn *ldst_elem,
  384. - uint32_t esz, uint32_t msz, uintptr_t ra,
  385. - MMUAccessType access_type)
  386. + uint32_t esz, uintptr_t ra, MMUAccessType access_type)
  387. {
  388. uint32_t i, k;
  389. uint32_t nf = vext_nf(desc);
  390. @@ -384,7 +383,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  391. if (!vm && !vext_elem_mask(v0, i)) {
  392. continue;
  393. }
  394. - probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra,
  395. + probe_pages(env, get_index_addr(base, i, vs2), nf * esz, ra,
  396. access_type);
  397. }
  398. /* load bytes from guest memory */
  399. @@ -394,67 +393,63 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  400. continue;
  401. }
  402. while (k < nf) {
  403. - abi_ptr addr = get_index_addr(base, i, vs2) + k * msz;
  404. + abi_ptr addr = get_index_addr(base, i, vs2) + k * esz;
  405. ldst_elem(env, addr, i + k * vlmax, vd, ra);
  406. k++;
  407. }
  408. }
  409. }
  410. -#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN) \
  411. +#define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \
  412. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  413. void *vs2, CPURISCVState *env, uint32_t desc) \
  414. { \
  415. vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
  416. - LOAD_FN, sizeof(ETYPE), sizeof(MTYPE), \
  417. - GETPC(), MMU_DATA_LOAD); \
  418. -}
  419. -
  420. -GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b)
  421. -GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h)
  422. -GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w)
  423. -GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d)
  424. -GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h)
  425. -GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w)
  426. -GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d)
  427. -GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w)
  428. -GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d)
  429. -GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b)
  430. -GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h)
  431. -GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w)
  432. -GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d)
  433. -GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b)
  434. -GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h)
  435. -GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w)
  436. -GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d)
  437. -GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h)
  438. -GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w)
  439. -GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d)
  440. -GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w)
  441. -GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d)
  442. -
  443. -#define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\
  444. + LOAD_FN, sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
  445. +}
  446. +
  447. +GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
  448. +GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h)
  449. +GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w)
  450. +GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d)
  451. +GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b)
  452. +GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h)
  453. +GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w)
  454. +GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d)
  455. +GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b)
  456. +GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h)
  457. +GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w)
  458. +GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d)
  459. +GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b)
  460. +GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h)
  461. +GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w)
  462. +GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d)
  463. +
  464. +#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \
  465. void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
  466. void *vs2, CPURISCVState *env, uint32_t desc) \
  467. { \
  468. vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
  469. - STORE_FN, sizeof(ETYPE), sizeof(MTYPE), \
  470. + STORE_FN, sizeof(ETYPE), \
  471. GETPC(), MMU_DATA_STORE); \
  472. }
  473. -GEN_VEXT_ST_INDEX(vsxb_v_b, int8_t, int8_t, idx_b, stb_b)
  474. -GEN_VEXT_ST_INDEX(vsxb_v_h, int8_t, int16_t, idx_h, stb_h)
  475. -GEN_VEXT_ST_INDEX(vsxb_v_w, int8_t, int32_t, idx_w, stb_w)
  476. -GEN_VEXT_ST_INDEX(vsxb_v_d, int8_t, int64_t, idx_d, stb_d)
  477. -GEN_VEXT_ST_INDEX(vsxh_v_h, int16_t, int16_t, idx_h, sth_h)
  478. -GEN_VEXT_ST_INDEX(vsxh_v_w, int16_t, int32_t, idx_w, sth_w)
  479. -GEN_VEXT_ST_INDEX(vsxh_v_d, int16_t, int64_t, idx_d, sth_d)
  480. -GEN_VEXT_ST_INDEX(vsxw_v_w, int32_t, int32_t, idx_w, stw_w)
  481. -GEN_VEXT_ST_INDEX(vsxw_v_d, int32_t, int64_t, idx_d, stw_d)
  482. -GEN_VEXT_ST_INDEX(vsxe_v_b, int8_t, int8_t, idx_b, ste_b)
  483. -GEN_VEXT_ST_INDEX(vsxe_v_h, int16_t, int16_t, idx_h, ste_h)
  484. -GEN_VEXT_ST_INDEX(vsxe_v_w, int32_t, int32_t, idx_w, ste_w)
  485. -GEN_VEXT_ST_INDEX(vsxe_v_d, int64_t, int64_t, idx_d, ste_d)
  486. +GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b)
  487. +GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h)
  488. +GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w)
  489. +GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d)
  490. +GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b)
  491. +GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h)
  492. +GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w)
  493. +GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d)
  494. +GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b)
  495. +GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h)
  496. +GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w)
  497. +GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d)
  498. +GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b)
  499. +GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h)
  500. +GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w)
  501. +GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d)
  502. /*
  503. *** unit-stride fault-only-fisrt load instructions
  504. --
  505. 2.33.1