0073-target-riscv-rvv-1.0-implement-vstart-CSR.patch 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. From 3968a1d276a980cfd6a0db2eb826c4b850f6d595 Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Fri, 14 Aug 2020 17:02:24 +0800
  4. Subject: [PATCH 073/107] target/riscv: rvv-1.0: implement vstart CSR
  5. * Update and check vstart value for vector instructions.
  6. * Add whole register move instruction helper functions as we have to
  7. call helper function for case where vstart is not zero.
  8. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  9. --
  10. Perhaps we can remove the probe functions in vector_helper.c to align with
  11. the hardware's behavior, which raise the memory access exceptions and
  12. update vstart value at the exact processing vector element.
  13. ---
  14. target/riscv/csr.c | 6 +-
  15. target/riscv/helper.h | 5 +
  16. target/riscv/insn_trans/trans_rvv.c.inc | 71 ++++++---
  17. target/riscv/translate.c | 6 +-
  18. target/riscv/vector_helper.c | 201 +++++++++++++++++-------
  19. 5 files changed, 205 insertions(+), 84 deletions(-)
  20. diff --git a/target/riscv/csr.c b/target/riscv/csr.c
  21. index e065b042df..b932e28bbf 100644
  22. --- a/target/riscv/csr.c
  23. +++ b/target/riscv/csr.c
  24. @@ -334,7 +334,11 @@ static int write_vstart(CPURISCVState *env, int csrno, target_ulong val)
  25. env->mstatus |= MSTATUS_VS;
  26. #endif
  27. - env->vstart = val;
  28. + /*
  29. + * The vstart CSR is defined to have only enough writable bits
  30. + * to hold the largest element index, i.e. lg2(VLEN) bits.
  31. + */
  32. + env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
  33. return 0;
  34. }
  35. diff --git a/target/riscv/helper.h b/target/riscv/helper.h
  36. index 572d2e5a4c..e233548623 100644
  37. --- a/target/riscv/helper.h
  38. +++ b/target/riscv/helper.h
  39. @@ -1148,6 +1148,11 @@ DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32)
  40. DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32)
  41. DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32)
  42. +DEF_HELPER_4(vmv1r_v, void, ptr, ptr, env, i32)
  43. +DEF_HELPER_4(vmv2r_v, void, ptr, ptr, env, i32)
  44. +DEF_HELPER_4(vmv4r_v, void, ptr, ptr, env, i32)
  45. +DEF_HELPER_4(vmv8r_v, void, ptr, ptr, env, i32)
  46. +
  47. DEF_HELPER_5(vzext_vf2_h, void, ptr, ptr, ptr, env, i32)
  48. DEF_HELPER_5(vzext_vf2_w, void, ptr, ptr, ptr, env, i32)
  49. DEF_HELPER_5(vzext_vf2_d, void, ptr, ptr, ptr, env, i32)
  50. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  51. index d205a05e83..b3d2a9113e 100644
  52. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  53. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  54. @@ -3014,7 +3014,8 @@ GEN_MM_TRANS(vmxnor_mm)
  55. static bool trans_vpopc_m(DisasContext *s, arg_rmr *a)
  56. {
  57. if (require_rvv(s) &&
  58. - vext_check_isa_ill(s)) {
  59. + vext_check_isa_ill(s) &&
  60. + s->vstart == 0) {
  61. TCGv_ptr src2, mask;
  62. TCGv dst;
  63. TCGv_i32 desc;
  64. @@ -3047,7 +3048,8 @@ static bool trans_vpopc_m(DisasContext *s, arg_rmr *a)
  65. static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
  66. {
  67. if (require_rvv(s) &&
  68. - vext_check_isa_ill(s)) {
  69. + vext_check_isa_ill(s) &&
  70. + s->vstart == 0) {
  71. TCGv_ptr src2, mask;
  72. TCGv dst;
  73. TCGv_i32 desc;
  74. @@ -3084,7 +3086,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
  75. if (require_rvv(s) && \
  76. vext_check_isa_ill(s) && \
  77. require_vm(a->vm, a->rd) && \
  78. - (a->rd != a->rs2)) { \
  79. + (a->rd != a->rs2) && \
  80. + (s->vstart == 0)) { \
  81. uint32_t data = 0; \
  82. gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
  83. TCGLabel *over = gen_new_label(); \
  84. @@ -3119,7 +3122,8 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
  85. vext_check_isa_ill(s) &&
  86. !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
  87. require_vm(a->vm, a->rd) &&
  88. - require_align(a->rd, s->lmul)) {
  89. + require_align(a->rd, s->lmul) &&
  90. + (s->vstart == 0)) {
  91. uint32_t data = 0;
  92. TCGLabel *over = gen_new_label();
  93. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  94. @@ -3341,6 +3345,7 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
  95. TCGLabel *over = gen_new_label();
  96. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  97. + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
  98. t1 = tcg_temp_new_i64();
  99. s1 = tcg_temp_new();
  100. @@ -3396,8 +3401,9 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
  101. TCGv_i64 t1;
  102. TCGLabel *over = gen_new_label();
  103. - /* if vl == 0, skip vector register write back */
  104. + /* if vl == 0 or vstart >= vl, skip vector register write back */
  105. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  106. + tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
  107. /* NaN-box f[rs1] */
  108. t1 = tcg_temp_new_i64();
  109. @@ -3569,6 +3575,7 @@ static bool vcompress_vm_check(DisasContext *s, arg_r *a)
  110. require_align(a->rs2, s->lmul) &&
  111. (a->rd != a->rs2) &&
  112. !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
  113. + (s->vstart == 0);
  114. }
  115. static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
  116. @@ -3597,26 +3604,40 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
  117. * Whole Vector Register Move Instructions ignore vtype and vl setting.
  118. * Thus, we don't need to check vill bit. (Section 17.6)
  119. */
  120. -#define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
  121. -static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
  122. -{ \
  123. - if (require_rvv(s) && \
  124. - QEMU_IS_ALIGNED(a->rd, LEN) && \
  125. - QEMU_IS_ALIGNED(a->rs2, LEN)) { \
  126. - /* EEW = 8 */ \
  127. - tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
  128. - vreg_ofs(s, a->rs2), \
  129. - s->vlen / 8 * LEN, s->vlen / 8 * LEN); \
  130. - mark_vs_dirty(s); \
  131. - return true; \
  132. - } \
  133. - return false; \
  134. -}
  135. -
  136. -GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
  137. -GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
  138. -GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
  139. -GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
  140. +#define GEN_VMV_WHOLE_TRANS(NAME, LEN, SEQ) \
  141. +static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
  142. +{ \
  143. + if (require_rvv(s) && \
  144. + QEMU_IS_ALIGNED(a->rd, LEN) && \
  145. + QEMU_IS_ALIGNED(a->rs2, LEN)) { \
  146. + uint32_t maxsz = (s->vlen >> 3) * LEN; \
  147. + if (s->vstart == 0) { \
  148. + /* EEW = 8 */ \
  149. + tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
  150. + vreg_ofs(s, a->rs2), maxsz, maxsz); \
  151. + mark_vs_dirty(s); \
  152. + } else { \
  153. + TCGLabel *over = gen_new_label(); \
  154. + tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over); \
  155. + \
  156. + static gen_helper_gvec_2_ptr * const fns[4] = { \
  157. + gen_helper_vmv1r_v, gen_helper_vmv2r_v, \
  158. + gen_helper_vmv4r_v, gen_helper_vmv8r_v, \
  159. + }; \
  160. + tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
  161. + cpu_env, 0, maxsz, 0, fns[SEQ]); \
  162. + mark_vs_dirty(s); \
  163. + gen_set_label(over); \
  164. + } \
  165. + return true; \
  166. + } \
  167. + return false; \
  168. +}
  169. +
  170. +GEN_VMV_WHOLE_TRANS(vmv1r_v, 1, 0)
  171. +GEN_VMV_WHOLE_TRANS(vmv2r_v, 2, 1)
  172. +GEN_VMV_WHOLE_TRANS(vmv4r_v, 4, 2)
  173. +GEN_VMV_WHOLE_TRANS(vmv8r_v, 8, 3)
  174. static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
  175. {
  176. diff --git a/target/riscv/translate.c b/target/riscv/translate.c
  177. index 4dfb0a2a51..15305a3096 100644
  178. --- a/target/riscv/translate.c
  179. +++ b/target/riscv/translate.c
  180. @@ -33,7 +33,7 @@
  181. #include "internals.h"
  182. /* global register indices */
  183. -static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
  184. +static TCGv cpu_gpr[32], cpu_pc, cpu_vl, cpu_vstart;
  185. static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
  186. static TCGv load_res;
  187. static TCGv load_val;
  188. @@ -77,6 +77,7 @@ typedef struct DisasContext {
  189. int8_t lmul;
  190. uint8_t sew;
  191. uint16_t vlen;
  192. + target_ulong vstart;
  193. bool vl_eq_vlmax;
  194. CPUState *cs;
  195. } DisasContext;
  196. @@ -712,6 +713,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
  197. ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
  198. ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
  199. ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
  200. + ctx->vstart = env->vstart;
  201. ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
  202. ctx->cs = cs;
  203. }
  204. @@ -829,6 +831,8 @@ void riscv_translate_init(void)
  205. cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
  206. cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
  207. + cpu_vstart = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vstart),
  208. + "vstart");
  209. load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
  210. "load_res");
  211. load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
  212. diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
  213. index 6f4f30c5f9..7a2d2e7949 100644
  214. --- a/target/riscv/vector_helper.c
  215. +++ b/target/riscv/vector_helper.c
  216. @@ -232,14 +232,14 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  217. uint32_t max_elems = vext_max_elems(desc, esz);
  218. /* probe every access*/
  219. - for (i = 0; i < env->vl; i++) {
  220. + for (i = env->vstart; i < env->vl; i++) {
  221. if (!vm && !vext_elem_mask(v0, i)) {
  222. continue;
  223. }
  224. probe_pages(env, base + stride * i, nf << esz, ra, access_type);
  225. }
  226. /* do real access */
  227. - for (i = 0; i < env->vl; i++) {
  228. + for (i = env->vstart; i < env->vl; i++) {
  229. k = 0;
  230. if (!vm && !vext_elem_mask(v0, i)) {
  231. continue;
  232. @@ -249,7 +249,9 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
  233. ldst_elem(env, addr, i + k * max_elems, vd, ra);
  234. k++;
  235. }
  236. + env->vstart = i;
  237. }
  238. + env->vstart = 0;
  239. }
  240. #define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \
  241. @@ -299,14 +301,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  242. /* probe every access */
  243. probe_pages(env, base, env->vl * (nf << esz), ra, access_type);
  244. /* load bytes from guest memory */
  245. - for (i = 0; i < env->vl; i++) {
  246. + for (i = env->vstart; i < env->vl; i++) {
  247. k = 0;
  248. while (k < nf) {
  249. target_ulong addr = base + ((i * nf + k) << esz);
  250. ldst_elem(env, addr, i + k * max_elems, vd, ra);
  251. k++;
  252. }
  253. + env->vstart = i;
  254. }
  255. + env->vstart = 0;
  256. }
  257. /*
  258. @@ -387,7 +391,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  259. uint32_t max_elems = vext_max_elems(desc, esz);
  260. /* probe every access*/
  261. - for (i = 0; i < env->vl; i++) {
  262. + for (i = env->vstart; i < env->vl; i++) {
  263. if (!vm && !vext_elem_mask(v0, i)) {
  264. continue;
  265. }
  266. @@ -395,7 +399,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  267. access_type);
  268. }
  269. /* load bytes from guest memory */
  270. - for (i = 0; i < env->vl; i++) {
  271. + for (i = env->vstart; i < env->vl; i++) {
  272. k = 0;
  273. if (!vm && !vext_elem_mask(v0, i)) {
  274. continue;
  275. @@ -405,7 +409,9 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
  276. ldst_elem(env, addr, i + k * max_elems, vd, ra);
  277. k++;
  278. }
  279. + env->vstart = i;
  280. }
  281. + env->vstart = 0;
  282. }
  283. #define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \
  284. @@ -476,7 +482,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
  285. target_ulong addr, offset, remain;
  286. /* probe every access*/
  287. - for (i = 0; i < env->vl; i++) {
  288. + for (i = env->vstart; i < env->vl; i++) {
  289. if (!vm && !vext_elem_mask(v0, i)) {
  290. continue;
  291. }
  292. @@ -516,7 +522,7 @@ ProbeSuccess:
  293. if (vl != 0) {
  294. env->vl = vl;
  295. }
  296. - for (i = 0; i < env->vl; i++) {
  297. + for (i = env->vstart; i < env->vl; i++) {
  298. k = 0;
  299. if (!vm && !vext_elem_mask(v0, i)) {
  300. continue;
  301. @@ -527,6 +533,7 @@ ProbeSuccess:
  302. k++;
  303. }
  304. }
  305. + env->vstart = 0;
  306. }
  307. #define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
  308. @@ -550,21 +557,37 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
  309. vext_ldst_elem_fn *ldst_elem, uint32_t esz, uintptr_t ra,
  310. MMUAccessType access_type)
  311. {
  312. - uint32_t i, k;
  313. + uint32_t i, k, off, pos;
  314. uint32_t nf = vext_nf(desc);
  315. uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
  316. uint32_t max_elems = vlenb >> esz;
  317. /* probe every access */
  318. - probe_pages(env, base, vlenb * nf, ra, access_type);
  319. + probe_pages(env, base, max_elems * nf, ra, access_type);
  320. - /* load bytes from guest memory */
  321. - for (k = 0; k < nf; k++) {
  322. + k = env->vstart / max_elems;
  323. + off = env->vstart % max_elems;
  324. +
  325. + if (off) {
  326. + /* load/store rest of elements of current segment pointed by vstart */
  327. + for (pos = off; pos < max_elems; pos++) {
  328. + target_ulong addr = base + ((pos + k * max_elems) << esz);
  329. + ldst_elem(env, addr, pos + k * max_elems, vd, ra);
  330. + env->vstart++;
  331. + }
  332. + k++;
  333. + }
  334. +
  335. + /* load/store elements for rest of segments */
  336. + for (; k < nf; k++) {
  337. for (i = 0; i < max_elems; i++) {
  338. target_ulong addr = base + ((i + k * max_elems) << esz);
  339. ldst_elem(env, addr, i + k * max_elems, vd, ra);
  340. + env->vstart++;
  341. }
  342. }
  343. +
  344. + env->vstart = 0;
  345. }
  346. #define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \
  347. @@ -725,20 +748,21 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
  348. uint32_t wd = vext_wd(desc);
  349. uint32_t vm = vext_vm(desc);
  350. - for (i = 0; i < env->vl; i++) {
  351. + for (i = env->vstart; i < env->vl; i++) {
  352. if (!vm && !vext_elem_mask(v0, i)) {
  353. continue;
  354. }
  355. probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_LOAD);
  356. probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_STORE);
  357. }
  358. - for (i = 0; i < env->vl; i++) {
  359. + for (i = env->vstart; i < env->vl; i++) {
  360. if (!vm && !vext_elem_mask(v0, i)) {
  361. continue;
  362. }
  363. addr = get_index_addr(base, i, vs2);
  364. noatomic_op(vs3, addr, wd, i, env, ra);
  365. }
  366. + env->vstart = 0;
  367. }
  368. #define GEN_VEXT_AMO(NAME, ETYPE, INDEX_FN) \
  369. @@ -895,12 +919,13 @@ static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
  370. uint32_t vl = env->vl;
  371. uint32_t i;
  372. - for (i = 0; i < vl; i++) {
  373. + for (i = env->vstart; i < vl; i++) {
  374. if (!vm && !vext_elem_mask(v0, i)) {
  375. continue;
  376. }
  377. fn(vd, vs1, vs2, i);
  378. }
  379. + env->vstart = 0;
  380. }
  381. /* generate the helpers for OPIVV */
  382. @@ -957,12 +982,13 @@ static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
  383. uint32_t vl = env->vl;
  384. uint32_t i;
  385. - for (i = 0; i < vl; i++) {
  386. + for (i = env->vstart; i < vl; i++) {
  387. if (!vm && !vext_elem_mask(v0, i)) {
  388. continue;
  389. }
  390. fn(vd, s1, vs2, i);
  391. }
  392. + env->vstart = 0;
  393. }
  394. /* generate the helpers for OPIVX */
  395. @@ -1150,13 +1176,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  396. uint32_t vl = env->vl; \
  397. uint32_t i; \
  398. \
  399. - for (i = 0; i < vl; i++) { \
  400. + for (i = env->vstart; i < vl; i++) { \
  401. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  402. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  403. ETYPE carry = vext_elem_mask(v0, i); \
  404. \
  405. *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \
  406. } \
  407. + env->vstart = 0; \
  408. }
  409. GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC)
  410. @@ -1176,12 +1203,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  411. uint32_t vl = env->vl; \
  412. uint32_t i; \
  413. \
  414. - for (i = 0; i < vl; i++) { \
  415. + for (i = env->vstart; i < vl; i++) { \
  416. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  417. ETYPE carry = vext_elem_mask(v0, i); \
  418. \
  419. *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
  420. } \
  421. + env->vstart = 0; \
  422. }
  423. GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC)
  424. @@ -1206,12 +1234,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  425. uint32_t vm = vext_vm(desc); \
  426. uint32_t i; \
  427. \
  428. - for (i = 0; i < vl; i++) { \
  429. + for (i = env->vstart; i < vl; i++) { \
  430. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  431. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  432. ETYPE carry = !vm && vext_elem_mask(v0, i); \
  433. vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry)); \
  434. } \
  435. + env->vstart = 0; \
  436. }
  437. GEN_VEXT_VMADC_VVM(vmadc_vvm_b, uint8_t, H1, DO_MADC)
  438. @@ -1232,12 +1261,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  439. uint32_t vm = vext_vm(desc); \
  440. uint32_t i; \
  441. \
  442. - for (i = 0; i < vl; i++) { \
  443. + for (i = env->vstart; i < vl; i++) { \
  444. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  445. ETYPE carry = !vm && vext_elem_mask(v0, i); \
  446. vext_set_elem_mask(vd, i, \
  447. DO_OP(s2, (ETYPE)(target_long)s1, carry)); \
  448. } \
  449. + env->vstart = 0; \
  450. }
  451. GEN_VEXT_VMADC_VXM(vmadc_vxm_b, uint8_t, H1, DO_MADC)
  452. @@ -1314,7 +1344,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  453. uint32_t vl = env->vl; \
  454. uint32_t i; \
  455. \
  456. - for (i = 0; i < vl; i++) { \
  457. + for (i = env->vstart; i < vl; i++) { \
  458. if (!vm && !vext_elem_mask(v0, i)) { \
  459. continue; \
  460. } \
  461. @@ -1322,6 +1352,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  462. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  463. *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \
  464. } \
  465. + env->vstart = 0; \
  466. }
  467. GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7)
  468. @@ -1348,13 +1379,14 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  469. uint32_t vl = env->vl; \
  470. uint32_t i; \
  471. \
  472. - for (i = 0; i < vl; i++) { \
  473. + for (i = env->vstart; i < vl; i++) { \
  474. if (!vm && !vext_elem_mask(v0, i)) { \
  475. continue; \
  476. } \
  477. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  478. *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \
  479. } \
  480. + env->vstart = 0; \
  481. }
  482. GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
  483. @@ -1401,7 +1433,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  484. uint32_t vl = env->vl; \
  485. uint32_t i; \
  486. \
  487. - for (i = 0; i < vl; i++) { \
  488. + for (i = env->vstart; i < vl; i++) { \
  489. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  490. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  491. if (!vm && !vext_elem_mask(v0, i)) { \
  492. @@ -1409,6 +1441,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  493. } \
  494. vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \
  495. } \
  496. + env->vstart = 0; \
  497. }
  498. GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t, H1, DO_MSEQ)
  499. @@ -1449,7 +1482,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  500. uint32_t vl = env->vl; \
  501. uint32_t i; \
  502. \
  503. - for (i = 0; i < vl; i++) { \
  504. + for (i = env->vstart; i < vl; i++) { \
  505. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  506. if (!vm && !vext_elem_mask(v0, i)) { \
  507. continue; \
  508. @@ -1457,6 +1490,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  509. vext_set_elem_mask(vd, i, \
  510. DO_OP(s2, (ETYPE)(target_long)s1)); \
  511. } \
  512. + env->vstart = 0; \
  513. }
  514. GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t, H1, DO_MSEQ)
  515. @@ -1979,10 +2013,11 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
  516. uint32_t vl = env->vl; \
  517. uint32_t i; \
  518. \
  519. - for (i = 0; i < vl; i++) { \
  520. + for (i = env->vstart; i < vl; i++) { \
  521. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  522. *((ETYPE *)vd + H(i)) = s1; \
  523. } \
  524. + env->vstart = 0; \
  525. }
  526. GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1)
  527. @@ -1997,9 +2032,10 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
  528. uint32_t vl = env->vl; \
  529. uint32_t i; \
  530. \
  531. - for (i = 0; i < vl; i++) { \
  532. + for (i = env->vstart; i < vl; i++) { \
  533. *((ETYPE *)vd + H(i)) = (ETYPE)s1; \
  534. } \
  535. + env->vstart = 0; \
  536. }
  537. GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1)
  538. @@ -2014,10 +2050,11 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  539. uint32_t vl = env->vl; \
  540. uint32_t i; \
  541. \
  542. - for (i = 0; i < vl; i++) { \
  543. + for (i = env->vstart; i < vl; i++) { \
  544. ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
  545. *((ETYPE *)vd + H(i)) = *(vt + H(i)); \
  546. } \
  547. + env->vstart = 0; \
  548. }
  549. GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1)
  550. @@ -2032,12 +2069,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
  551. uint32_t vl = env->vl; \
  552. uint32_t i; \
  553. \
  554. - for (i = 0; i < vl; i++) { \
  555. + for (i = env->vstart; i < vl; i++) { \
  556. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  557. ETYPE d = (!vext_elem_mask(v0, i) ? s2 : \
  558. (ETYPE)(target_long)s1); \
  559. *((ETYPE *)vd + H(i)) = d; \
  560. } \
  561. + env->vstart = 0; \
  562. }
  563. GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1)
  564. @@ -2074,12 +2112,13 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
  565. uint32_t vl, uint32_t vm, int vxrm,
  566. opivv2_rm_fn *fn)
  567. {
  568. - for (uint32_t i = 0; i < vl; i++) {
  569. + for (uint32_t i = env->vstart; i < vl; i++) {
  570. if (!vm && !vext_elem_mask(v0, i)) {
  571. continue;
  572. }
  573. fn(vd, vs1, vs2, i, env, vxrm);
  574. }
  575. + env->vstart = 0;
  576. }
  577. static inline void
  578. @@ -2190,12 +2229,13 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
  579. uint32_t vl, uint32_t vm, int vxrm,
  580. opivx2_rm_fn *fn)
  581. {
  582. - for (uint32_t i = 0; i < vl; i++) {
  583. + for (uint32_t i = env->vstart; i < vl; i++) {
  584. if (!vm && !vext_elem_mask(v0, i)) {
  585. continue;
  586. }
  587. fn(vd, s1, vs2, i, env, vxrm);
  588. }
  589. + env->vstart = 0;
  590. }
  591. static inline void
  592. @@ -2977,12 +3017,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  593. uint32_t vl = env->vl; \
  594. uint32_t i; \
  595. \
  596. - for (i = 0; i < vl; i++) { \
  597. + for (i = env->vstart; i < vl; i++) { \
  598. if (!vm && !vext_elem_mask(v0, i)) { \
  599. continue; \
  600. } \
  601. do_##NAME(vd, vs1, vs2, i, env); \
  602. } \
  603. + env->vstart = 0; \
  604. }
  605. RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add)
  606. @@ -3009,12 +3050,13 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
  607. uint32_t vl = env->vl; \
  608. uint32_t i; \
  609. \
  610. - for (i = 0; i < vl; i++) { \
  611. + for (i = env->vstart; i < vl; i++) { \
  612. if (!vm && !vext_elem_mask(v0, i)) { \
  613. continue; \
  614. } \
  615. do_##NAME(vd, s1, vs2, i, env); \
  616. } \
  617. + env->vstart = 0; \
  618. }
  619. RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add)
  620. @@ -3580,12 +3622,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  621. if (vl == 0) { \
  622. return; \
  623. } \
  624. - for (i = 0; i < vl; i++) { \
  625. + for (i = env->vstart; i < vl; i++) { \
  626. if (!vm && !vext_elem_mask(v0, i)) { \
  627. continue; \
  628. } \
  629. do_##NAME(vd, vs2, i, env); \
  630. } \
  631. + env->vstart = 0; \
  632. }
  633. RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt)
  634. @@ -3716,7 +3759,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  635. uint32_t vl = env->vl; \
  636. uint32_t i; \
  637. \
  638. - for (i = 0; i < vl; i++) { \
  639. + for (i = env->vstart; i < vl; i++) { \
  640. ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
  641. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  642. if (!vm && !vext_elem_mask(v0, i)) { \
  643. @@ -3725,6 +3768,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  644. vext_set_elem_mask(vd, i, \
  645. DO_OP(s2, s1, &env->fp_status)); \
  646. } \
  647. + env->vstart = 0; \
  648. }
  649. GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet)
  650. @@ -3739,7 +3783,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  651. uint32_t vl = env->vl; \
  652. uint32_t i; \
  653. \
  654. - for (i = 0; i < vl; i++) { \
  655. + for (i = env->vstart; i < vl; i++) { \
  656. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  657. if (!vm && !vext_elem_mask(v0, i)) { \
  658. continue; \
  659. @@ -3747,6 +3791,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  660. vext_set_elem_mask(vd, i, \
  661. DO_OP(s2, (ETYPE)s1, &env->fp_status)); \
  662. } \
  663. + env->vstart = 0; \
  664. }
  665. GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet)
  666. @@ -3855,12 +3900,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  667. uint32_t vl = env->vl; \
  668. uint32_t i; \
  669. \
  670. - for (i = 0; i < vl; i++) { \
  671. + for (i = env->vstart; i < vl; i++) { \
  672. if (!vm && !vext_elem_mask(v0, i)) { \
  673. continue; \
  674. } \
  675. do_##NAME(vd, vs2, i); \
  676. } \
  677. + env->vstart = 0; \
  678. }
  679. target_ulong fclass_h(uint64_t frs1)
  680. @@ -3936,11 +3982,12 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
  681. uint32_t vl = env->vl; \
  682. uint32_t i; \
  683. \
  684. - for (i = 0; i < vl; i++) { \
  685. + for (i = env->vstart; i < vl; i++) { \
  686. ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
  687. *((ETYPE *)vd + H(i)) \
  688. = (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \
  689. } \
  690. + env->vstart = 0; \
  691. }
  692. GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
  693. @@ -4084,7 +4131,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  694. uint32_t i; \
  695. TD s1 = *((TD *)vs1 + HD(0)); \
  696. \
  697. - for (i = 0; i < vl; i++) { \
  698. + for (i = env->vstart; i < vl; i++) { \
  699. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  700. if (!vm && !vext_elem_mask(v0, i)) { \
  701. continue; \
  702. @@ -4092,6 +4139,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  703. s1 = OP(s1, (TD)s2); \
  704. } \
  705. *((TD *)vd + HD(0)) = s1; \
  706. + env->vstart = 0; \
  707. }
  708. /* vd[0] = sum(vs1[0], vs2[*]) */
  709. @@ -4164,7 +4212,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  710. uint32_t i; \
  711. TD s1 = *((TD *)vs1 + HD(0)); \
  712. \
  713. - for (i = 0; i < vl; i++) { \
  714. + for (i = env->vstart; i < vl; i++) { \
  715. TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
  716. if (!vm && !vext_elem_mask(v0, i)) { \
  717. continue; \
  718. @@ -4172,6 +4220,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  719. s1 = OP(s1, (TD)s2, &env->fp_status); \
  720. } \
  721. *((TD *)vd + HD(0)) = s1; \
  722. + env->vstart = 0; \
  723. }
  724. /* Unordered sum */
  725. @@ -4199,7 +4248,7 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  726. uint32_t i;
  727. uint32_t s1 = *((uint32_t *)vs1 + H4(0));
  728. - for (i = 0; i < vl; i++) {
  729. + for (i = env->vstart; i < vl; i++) {
  730. uint16_t s2 = *((uint16_t *)vs2 + H2(i));
  731. if (!vm && !vext_elem_mask(v0, i)) {
  732. continue;
  733. @@ -4208,6 +4257,7 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
  734. &env->fp_status);
  735. }
  736. *((uint32_t *)vd + H4(0)) = s1;
  737. + env->vstart = 0;
  738. }
  739. void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  740. @@ -4218,7 +4268,7 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  741. uint32_t i;
  742. uint64_t s1 = *((uint64_t *)vs1);
  743. - for (i = 0; i < vl; i++) {
  744. + for (i = env->vstart; i < vl; i++) {
  745. uint32_t s2 = *((uint32_t *)vs2 + H4(i));
  746. if (!vm && !vext_elem_mask(v0, i)) {
  747. continue;
  748. @@ -4227,6 +4277,7 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
  749. &env->fp_status);
  750. }
  751. *((uint64_t *)vd) = s1;
  752. + env->vstart = 0;
  753. }
  754. /*
  755. @@ -4242,11 +4293,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
  756. uint32_t i; \
  757. int a, b; \
  758. \
  759. - for (i = 0; i < vl; i++) { \
  760. + for (i = env->vstart; i < vl; i++) { \
  761. a = vext_elem_mask(vs1, i); \
  762. b = vext_elem_mask(vs2, i); \
  763. vext_set_elem_mask(vd, i, OP(b, a)); \
  764. } \
  765. + env->vstart = 0; \
  766. }
  767. #define DO_NAND(N, M) (!(N & M))
  768. @@ -4273,13 +4325,14 @@ target_ulong HELPER(vpopc_m)(void *v0, void *vs2, CPURISCVState *env,
  769. uint32_t vl = env->vl;
  770. int i;
  771. - for (i = 0; i < vl; i++) {
  772. + for (i = env->vstart; i < vl; i++) {
  773. if (vm || vext_elem_mask(v0, i)) {
  774. if (vext_elem_mask(vs2, i)) {
  775. cnt++;
  776. }
  777. }
  778. }
  779. + env->vstart = 0;
  780. return cnt;
  781. }
  782. @@ -4291,13 +4344,14 @@ target_ulong HELPER(vfirst_m)(void *v0, void *vs2, CPURISCVState *env,
  783. uint32_t vl = env->vl;
  784. int i;
  785. - for (i = 0; i < vl; i++) {
  786. + for (i = env->vstart; i < vl; i++) {
  787. if (vm || vext_elem_mask(v0, i)) {
  788. if (vext_elem_mask(vs2, i)) {
  789. return i;
  790. }
  791. }
  792. }
  793. + env->vstart = 0;
  794. return -1LL;
  795. }
  796. @@ -4315,7 +4369,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
  797. int i;
  798. bool first_mask_bit = false;
  799. - for (i = 0; i < vl; i++) {
  800. + for (i = env->vstart; i < vl; i++) {
  801. if (!vm && !vext_elem_mask(v0, i)) {
  802. continue;
  803. }
  804. @@ -4339,6 +4393,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
  805. }
  806. }
  807. }
  808. + env->vstart = 0;
  809. }
  810. void HELPER(vmsbf_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
  811. @@ -4369,7 +4424,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
  812. uint32_t sum = 0; \
  813. int i; \
  814. \
  815. - for (i = 0; i < vl; i++) { \
  816. + for (i = env->vstart; i < vl; i++) { \
  817. if (!vm && !vext_elem_mask(v0, i)) { \
  818. continue; \
  819. } \
  820. @@ -4378,6 +4433,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
  821. sum++; \
  822. } \
  823. } \
  824. + env->vstart = 0; \
  825. }
  826. GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1)
  827. @@ -4393,12 +4449,13 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
  828. uint32_t vl = env->vl; \
  829. int i; \
  830. \
  831. - for (i = 0; i < vl; i++) { \
  832. + for (i = env->vstart; i < vl; i++) { \
  833. if (!vm && !vext_elem_mask(v0, i)) { \
  834. continue; \
  835. } \
  836. *((ETYPE *)vd + H(i)) = i; \
  837. } \
  838. + env->vstart = 0; \
  839. }
  840. GEN_VEXT_VID_V(vid_v_b, uint8_t, H1)
  841. @@ -4417,9 +4474,10 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  842. { \
  843. uint32_t vm = vext_vm(desc); \
  844. uint32_t vl = env->vl; \
  845. - target_ulong offset = s1, i; \
  846. + target_ulong offset = s1, i_min, i; \
  847. \
  848. - for (i = offset; i < vl; i++) { \
  849. + i_min = MAX(env->vstart, offset); \
  850. + for (i = i_min; i < vl; i++) { \
  851. if (!vm && !vext_elem_mask(v0, i)) { \
  852. continue; \
  853. } \
  854. @@ -4442,8 +4500,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  855. uint32_t vl = env->vl; \
  856. target_ulong i_max, i; \
  857. \
  858. - i_max = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
  859. - for (i = 0; i < i_max; ++i) { \
  860. + i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
  861. + for (i = env->vstart; i < i_max; ++i) { \
  862. if (vm || vext_elem_mask(v0, i)) { \
  863. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
  864. } \
  865. @@ -4454,6 +4512,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  866. *((ETYPE *)vd + H(i)) = 0; \
  867. } \
  868. } \
  869. + \
  870. + env->vstart = 0; \
  871. }
  872. /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
  873. @@ -4471,7 +4531,7 @@ static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
  874. uint32_t vl = env->vl; \
  875. uint32_t i; \
  876. \
  877. - for (i = 0; i < vl; i++) { \
  878. + for (i = env->vstart; i < vl; i++) { \
  879. if (!vm && !vext_elem_mask(v0, i)) { \
  880. continue; \
  881. } \
  882. @@ -4481,6 +4541,7 @@ static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
  883. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
  884. } \
  885. } \
  886. + env->vstart = 0; \
  887. }
  888. GEN_VEXT_VSLIE1UP(8, H1)
  889. @@ -4510,7 +4571,7 @@ static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
  890. uint32_t vl = env->vl; \
  891. uint32_t i; \
  892. \
  893. - for (i = 0; i < vl; i++) { \
  894. + for (i = env->vstart; i < vl; i++) { \
  895. if (!vm && !vext_elem_mask(v0, i)) { \
  896. continue; \
  897. } \
  898. @@ -4520,6 +4581,7 @@ static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
  899. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
  900. } \
  901. } \
  902. + env->vstart = 0; \
  903. }
  904. GEN_VEXT_VSLIDE1DOWN(8, H1)
  905. @@ -4570,13 +4632,13 @@ GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64)
  906. void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  907. CPURISCVState *env, uint32_t desc) \
  908. { \
  909. - uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS1))); \
  910. + uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2))); \
  911. uint32_t vm = vext_vm(desc); \
  912. uint32_t vl = env->vl; \
  913. uint64_t index; \
  914. uint32_t i; \
  915. \
  916. - for (i = 0; i < vl; i++) { \
  917. + for (i = env->vstart; i < vl; i++) { \
  918. if (!vm && !vext_elem_mask(v0, i)) { \
  919. continue; \
  920. } \
  921. @@ -4587,6 +4649,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  922. *((TS2 *)vd + HS2(i)) = *((TS2 *)vs2 + HS2(index)); \
  923. } \
  924. } \
  925. + env->vstart = 0; \
  926. }
  927. /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
  928. @@ -4610,7 +4673,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  929. uint64_t index = s1; \
  930. uint32_t i; \
  931. \
  932. - for (i = 0; i < vl; i++) { \
  933. + for (i = env->vstart; i < vl; i++) { \
  934. if (!vm && !vext_elem_mask(v0, i)) { \
  935. continue; \
  936. } \
  937. @@ -4620,6 +4683,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
  938. *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
  939. } \
  940. } \
  941. + env->vstart = 0; \
  942. }
  943. /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
  944. @@ -4636,13 +4700,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
  945. uint32_t vl = env->vl; \
  946. uint32_t num = 0, i; \
  947. \
  948. - for (i = 0; i < vl; i++) { \
  949. + for (i = env->vstart; i < vl; i++) { \
  950. if (!vext_elem_mask(vs1, i)) { \
  951. continue; \
  952. } \
  953. *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \
  954. num++; \
  955. } \
  956. + env->vstart = 0; \
  957. }
  958. /* Compress into vd elements of vs2 where vs1 is enabled */
  959. @@ -4651,6 +4716,27 @@ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2)
  960. GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4)
  961. GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8)
  962. +/* Vector Whole Register Move */
  963. +#define GEN_VEXT_VMV_WHOLE(NAME, LEN) \
  964. +void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
  965. + uint32_t desc) \
  966. +{ \
  967. + /* EEW = 8 */ \
  968. + uint32_t maxsz = simd_maxsz(desc); \
  969. + uint32_t i = env->vstart; \
  970. + \
  971. + memcpy((uint8_t *)vd + H1(i), \
  972. + (uint8_t *)vs2 + H1(i), \
  973. + maxsz - env->vstart); \
  974. + \
  975. + env->vstart = 0; \
  976. +}
  977. +
  978. +GEN_VEXT_VMV_WHOLE(vmv1r_v, 1)
  979. +GEN_VEXT_VMV_WHOLE(vmv2r_v, 2)
  980. +GEN_VEXT_VMV_WHOLE(vmv4r_v, 4)
  981. +GEN_VEXT_VMV_WHOLE(vmv8r_v, 8)
  982. +
  983. /* Vector Integer Extension */
  984. #define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1) \
  985. void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  986. @@ -4660,12 +4746,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
  987. uint32_t vm = vext_vm(desc); \
  988. uint32_t i; \
  989. \
  990. - for (i = 0; i < vl; i++) { \
  991. + for (i = env->vstart; i < vl; i++) { \
  992. if (!vm && !vext_elem_mask(v0, i)) { \
  993. continue; \
  994. } \
  995. *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
  996. } \
  997. + env->vstart = 0; \
  998. }
  999. GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
  1000. --
  1001. 2.33.1