Browse Source

dubhe: Update starfive dubhe machine configuration

Changed several parameters to support booting starfive dubhe image with
runqemu.

Signed-off-by: Jun Yuan Tan <junyuan.tan@starfivetech.com>

# Conflicts:
#	conf/machine/starfive-dubhe.conf
Tien Hock Loh 2 years ago
parent
commit
d493637710
100 changed files with 19803 additions and 1803 deletions
  1. 1 3
      conf/machine/starfive-dubhe.conf
  2. 0 109
      recipes-devtools/qemu/files/0001-target-riscv-rvb-Carry-less-multiply-instruction.patch
  3. 0 142
      recipes-devtools/qemu/files/0002-target-riscv-rvb-add-cmix-cmov-instruction.patch
  4. 0 276
      recipes-devtools/qemu/files/0003-target-riscv-rvb-add-funnel-shfit-instruction.patch
  5. 0 264
      recipes-devtools/qemu/files/0004-target-riscv-rvb-add-generalized-shuffle.patch
  6. 0 137
      recipes-devtools/qemu/files/0005-target-riscv-rvb-add-crossbar-permutation-instructio.patch
  7. 0 153
      recipes-devtools/qemu/files/0006-target-riscv-rvb-add-bfp-bfpw-instructions.patch
  8. 0 164
      recipes-devtools/qemu/files/0007-target-riscv-rvb-modified-some-errors-on-some-rv64-o.patch
  9. 0 174
      recipes-devtools/qemu/files/0008-target-riscv-rvb-add-bcompress-bdecompress-instructi.patch
  10. 0 262
      recipes-devtools/qemu/files/0009-target-riscv-rvb-add-CRC-bit-matrix-instructions.patch
  11. 0 69
      recipes-devtools/qemu/files/0010-target-riscv-rvb-modified-srow-error.patch
  12. 0 29
      recipes-devtools/qemu/files/0011-support-b-extention-on-default-config.patch
  13. 0 0
      recipes-devtools/qemu/qemu-native_6.0.0.bb
  14. 1 4
      recipes-devtools/qemu/qemu-system-native_6.0.0.bb
  15. 133 17
      recipes-devtools/qemu/qemu.inc
  16. 1177 0
      recipes-devtools/qemu/qemu/0001-merge-riscv-bitmapip-b0p94-version.patch
  17. 214 0
      recipes-devtools/qemu/qemu/0001-softfloat-add-APIs-to-handle-alternative-sNaN-propag.patch
  18. 110 0
      recipes-devtools/qemu/qemu/0002-Add-four-cache-csr-instruction.patch
  19. 53 0
      recipes-devtools/qemu/qemu/0002-target-riscv-change-the-api-for-single-double-fmin-f.patch
  20. 38 0
      recipes-devtools/qemu/qemu/0003-target-riscv-support-x-Zfh-in-cpu-option.patch
  21. 979 0
      recipes-devtools/qemu/qemu/0004-target-riscv-Implement-zfh-extension.patch
  22. 64 0
      recipes-devtools/qemu/qemu/0005-target-riscv-fix-TB_FLAGS-bits-overlapping-bug-for-r.patch
  23. 46 0
      recipes-devtools/qemu/qemu/0006-fpu-softfloat-set-invalid-excp-flag-for-RISC-V-mulad.patch
  24. 60 0
      recipes-devtools/qemu/qemu/0007-target-riscv-Fixup-saturate-subtract-function.patch
  25. 42 0
      recipes-devtools/qemu/qemu/0008-target-riscv-fix-vrgather-macro-index-variable-type-.patch
  26. 62 0
      recipes-devtools/qemu/qemu/0009-target-riscv-drop-vector-0.7.1-and-add-1.0-support.patch
  27. 28 0
      recipes-devtools/qemu/qemu/0010-target-riscv-Use-FIELD_EX32-to-extract-wd-field.patch
  28. 171 0
      recipes-devtools/qemu/qemu/0011-target-riscv-rvv-1.0-add-mstatus-VS-field.patch
  29. 42 0
      recipes-devtools/qemu/qemu/0012-target-riscv-rvv-1.0-add-sstatus-VS-field.patch
  30. 33 0
      recipes-devtools/qemu/qemu/0013-target-riscv-rvv-1.0-introduce-writable-misa.v-field.patch
  31. 486 0
      recipes-devtools/qemu/qemu/0014-target-riscv-rvv-1.0-add-translation-time-vector-con.patch
  32. 60 0
      recipes-devtools/qemu/qemu/0015-target-riscv-rvv-1.0-remove-rvv-related-codes-from-f.patch
  33. 77 0
      recipes-devtools/qemu/qemu/0016-target-riscv-rvv-1.0-add-vcsr-register.patch
  34. 54 0
      recipes-devtools/qemu/qemu/0017-target-riscv-rvv-1.0-add-vlenb-register.patch
  35. 35 0
      recipes-devtools/qemu/qemu/0018-target-riscv-rvv-1.0-check-MSTATUS_VS-when-accessing.patch
  36. 1260 0
      recipes-devtools/qemu/qemu/0019-target-riscv-rvv-1.0-remove-MLEN-calculations.patch
  37. 123 0
      recipes-devtools/qemu/qemu/0020-target-riscv-rvv-1.0-add-fractional-LMUL.patch
  38. 3290 0
      recipes-devtools/qemu/qemu/0021-target-riscv-rvv-1.0-add-VMA-and-VTA.patch
  39. 1088 0
      recipes-devtools/qemu/qemu/0022-target-riscv-rvv-1.0-update-check-functions.patch
  40. 299 0
      recipes-devtools/qemu/qemu/0023-target-riscv-introduce-more-imm-value-modes-in-trans.patch
  41. 87 0
      recipes-devtools/qemu/qemu/0024-target-riscv-rvv-1.0-add-translation-time-nan-box-he.patch
  42. 133 0
      recipes-devtools/qemu/qemu/0025-target-riscv-rvv-1.0-configure-instructions.patch
  43. 845 0
      recipes-devtools/qemu/qemu/0026-target-riscv-rvv-1.0-stride-load-and-store-instructi.patch
  44. 527 0
      recipes-devtools/qemu/qemu/0027-target-riscv-rvv-1.0-index-load-and-store-instructio.patch
  45. 38 0
      recipes-devtools/qemu/qemu/0028-target-riscv-rvv-1.0-fix-address-index-overflow-bug-.patch
  46. 245 0
      recipes-devtools/qemu/qemu/0029-target-riscv-rvv-1.0-fault-only-first-unit-stride-lo.patch
  47. 763 0
      recipes-devtools/qemu/qemu/0030-target-riscv-rvv-1.0-amo-operations.patch
  48. 244 0
      recipes-devtools/qemu/qemu/0031-target-riscv-rvv-1.0-load-store-whole-register-instr.patch
  49. 411 0
      recipes-devtools/qemu/qemu/0032-target-riscv-rvv-1.0-update-vext_max_elems-for-load-.patch
  50. 110 0
      recipes-devtools/qemu/qemu/0033-target-riscv-rvv-1.0-take-fractional-LMUL-into-vecto.patch
  51. 29 0
      recipes-devtools/qemu/qemu/0034-target-riscv-rvv-1.0-floating-point-square-root-inst.patch
  52. 29 0
      recipes-devtools/qemu/qemu/0035-target-riscv-rvv-1.0-floating-point-classify-instruc.patch
  53. 92 0
      recipes-devtools/qemu/qemu/0036-target-riscv-rvv-1.0-mask-population-count-instructi.patch
  54. 83 0
      recipes-devtools/qemu/qemu/0037-target-riscv-rvv-1.0-find-first-set-mask-bit-instruc.patch
  55. 72 0
      recipes-devtools/qemu/qemu/0038-target-riscv-rvv-1.0-set-X-first-mask-bit-instructio.patch
  56. 53 0
      recipes-devtools/qemu/qemu/0039-target-riscv-rvv-1.0-iota-instruction.patch
  57. 27 0
      recipes-devtools/qemu/qemu/0040-target-riscv-rvv-1.0-element-index-instruction.patch
  58. 104 0
      recipes-devtools/qemu/qemu/0041-target-riscv-rvv-1.0-allow-load-element-with-sign-ex.patch
  59. 150 0
      recipes-devtools/qemu/qemu/0042-target-riscv-rvv-1.0-register-gather-instructions.patch
  60. 103 0
      recipes-devtools/qemu/qemu/0043-target-riscv-rvv-1.0-integer-scalar-move-instruction.patch
  61. 62 0
      recipes-devtools/qemu/qemu/0044-target-riscv-rvv-1.0-floating-point-move-instruction.patch
  62. 114 0
      recipes-devtools/qemu/qemu/0045-target-riscv-rvv-1.0-floating-point-scalar-move-inst.patch
  63. 70 0
      recipes-devtools/qemu/qemu/0046-target-riscv-rvv-1.0-whole-register-move-instruction.patch
  64. 194 0
      recipes-devtools/qemu/qemu/0047-target-riscv-rvv-1.0-integer-extension-instructions.patch
  65. 203 0
      recipes-devtools/qemu/qemu/0048-target-riscv-rvv-1.0-single-width-averaging-add-and-.patch
  66. 34 0
      recipes-devtools/qemu/qemu/0049-target-riscv-rvv-1.0-single-width-bit-shift-instruct.patch
  67. 130 0
      recipes-devtools/qemu/qemu/0050-target-riscv-rvv-1.0-integer-add-with-carry-subtract.patch
  68. 195 0
      recipes-devtools/qemu/qemu/0051-target-riscv-rvv-1.0-narrowing-integer-right-shift-i.patch
  69. 32 0
      recipes-devtools/qemu/qemu/0052-target-riscv-rvv-1.0-widening-integer-multiply-add-i.patch
  70. 30 0
      recipes-devtools/qemu/qemu/0053-target-riscv-rvv-1.0-single-width-saturating-add-and.patch
  71. 76 0
      recipes-devtools/qemu/qemu/0054-target-riscv-rvv-1.0-integer-comparison-instructions.patch
  72. 55 0
      recipes-devtools/qemu/qemu/0055-target-riscv-rvv-1.0-floating-point-compare-instruct.patch
  73. 52 0
      recipes-devtools/qemu/qemu/0056-target-riscv-rvv-1.0-mask-register-logical-instructi.patch
  74. 51 0
      recipes-devtools/qemu/qemu/0057-target-riscv-rvv-1.0-slide-instructions.patch
  75. 238 0
      recipes-devtools/qemu/qemu/0058-target-riscv-rvv-1.0-floating-point-slide-instructio.patch
  76. 177 0
      recipes-devtools/qemu/qemu/0059-target-riscv-rvv-1.0-narrowing-fixed-point-clip-inst.patch
  77. 64 0
      recipes-devtools/qemu/qemu/0060-target-riscv-rvv-1.0-single-width-floating-point-red.patch
  78. 35 0
      recipes-devtools/qemu/qemu/0061-target-riscv-rvv-1.0-widening-floating-point-reducti.patch
  79. 32 0
      recipes-devtools/qemu/qemu/0062-target-riscv-rvv-1.0-single-width-scaling-shift-inst.patch
  80. 305 0
      recipes-devtools/qemu/qemu/0063-target-riscv-rvv-1.0-remove-widening-saturating-scal.patch
  81. 85 0
      recipes-devtools/qemu/qemu/0064-target-riscv-rvv-1.0-remove-vmford.vv-and-vmford.vf.patch
  82. 69 0
      recipes-devtools/qemu/qemu/0065-target-riscv-rvv-1.0-remove-integer-extract-instruct.patch
  83. 60 0
      recipes-devtools/qemu/qemu/0066-target-riscv-rvv-1.0-floating-point-min-max-instruct.patch
  84. 156 0
      recipes-devtools/qemu/qemu/0067-target-riscv-introduce-floating-point-rounding-mode-.patch
  85. 160 0
      recipes-devtools/qemu/qemu/0068-target-riscv-rvv-1.0-floating-point-integer-type-con.patch
  86. 187 0
      recipes-devtools/qemu/qemu/0069-target-riscv-rvv-1.0-widening-floating-point-integer.patch
  87. 84 0
      recipes-devtools/qemu/qemu/0070-target-riscv-add-set-round-to-odd-rounding-mode-help.patch
  88. 231 0
      recipes-devtools/qemu/qemu/0071-target-riscv-rvv-1.0-narrowing-floating-point-intege.patch
  89. 58 0
      recipes-devtools/qemu/qemu/0072-target-riscv-rvv-1.0-relax-RV_VLEN_MAX-to-1024-bits.patch
  90. 1078 0
      recipes-devtools/qemu/qemu/0073-target-riscv-rvv-1.0-implement-vstart-CSR.patch
  91. 98 0
      recipes-devtools/qemu/qemu/0074-target-riscv-rvv-1.0-trigger-illegal-instruction-exc.patch
  92. 55 0
      recipes-devtools/qemu/qemu/0075-target-riscv-rvv-1.0-set-mstatus.SD-bit-when-writing.patch
  93. 259 0
      recipes-devtools/qemu/qemu/0076-target-riscv-gdb-support-vector-registers-for-rv64-r.patch
  94. 260 0
      recipes-devtools/qemu/qemu/0077-target-riscv-rvv-1.0-floating-point-reciprocal-squar.patch
  95. 260 0
      recipes-devtools/qemu/qemu/0078-target-riscv-rvv-1.0-floating-point-reciprocal-estim.patch
  96. 44 0
      recipes-devtools/qemu/qemu/0079-target-riscv-set-mstatus.SD-bit-when-writing-fp-CSRs.patch
  97. 38 0
      recipes-devtools/qemu/qemu/0080-target-riscv-rvv-1.0-rename-r2_zimm-to-r2_zimm11.patch
  98. 82 0
      recipes-devtools/qemu/qemu/0081-target-riscv-rvv-1.0-add-vsetivli-instruction.patch
  99. 91 0
      recipes-devtools/qemu/qemu/0082-target-riscv-rvv-1.0-add-evl-parameter-to-vext_ldst_.patch
  100. 128 0
      recipes-devtools/qemu/qemu/0083-target-riscv-rvv-1.0-add-vector-unit-stride-mask-loa.patch

+ 1 - 3
conf/machine/starfive-dubhe.conf

@@ -54,9 +54,7 @@ PREFERRED_PROVIDER_virtual/kernel ?= "linux-starfive-dev"
 RISCV_SBI_PLAT = "generic"
 RISCV_SBI_PAYLOAD = "Image-initramfs-starfive-dubhe.bin"
 
-PREFERRED_VERSION_qemu = "6.1.0"
-PREFERRED_VERSION_qemu-native = "6.1.0"
-PREFERRED_VERSION_nativesdk-qemu = "6.1.0"
+QEMUVERSION = "6.0.0"
 QEMU_EXTRAOPTIONS_riscv64 = " -cpu rv64,x-b=true"
 
 GCCVERSION = "11.%"

+ 0 - 109
recipes-devtools/qemu/files/0001-target-riscv-rvb-Carry-less-multiply-instruction.patch

@@ -1,109 +0,0 @@
-From 5e47b23aded34646d4a2d0424d5ccc3eff3bcb6c Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Wed, 7 Jul 2021 11:19:22 +0800
-Subject: [PATCH 01/11] target/riscv: rvb: Carry-less multiply instruction
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 34 +++++++++++++++++++++++++
- target/riscv/helper.h                   |  3 +++
- target/riscv/insn32.decode              |  4 +++
- target/riscv/insn_trans/trans_rvb.c.inc | 11 ++++++++
- 4 files changed, 52 insertions(+)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index 5b2f795d03..29dfe921ab 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -88,3 +88,37 @@ target_ulong HELPER(gorcw)(target_ulong rs1, target_ulong rs2)
- {
-     return do_gorc(rs1, rs2, 32);
- }
-+
-+#define DO_CLMULA(NAME, NUM, BODY)                          \
-+static target_ulong do_##NAME(target_ulong rs1,             \
-+                              target_ulong rs2,             \
-+                              int bits)                     \
-+{                                                           \
-+    target_ulong x = 0;                                     \
-+    int i;                                                  \
-+                                                            \
-+    for(i = NUM; i < bits; i++)                             \
-+        if ((rs2 >> i) & 1)                                 \
-+            x ^= BODY;                                      \
-+                                                            \
-+    return x;                                               \
-+}
-+
-+DO_CLMULA(clmul, 0, (rs1 << i))
-+DO_CLMULA(clmulh, 1, (rs1 >> (bits - i)))
-+DO_CLMULA(clmulr, 0, (rs1 >> (bits - i - 1)))
-+
-+target_ulong HELPER(clmul)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_clmul(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(clmulh)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_clmulh(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(clmulr)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_clmulr(rs1, rs2, TARGET_LONG_BITS);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index 415e37bc37..6ee9e8d058 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -63,6 +63,9 @@ DEF_HELPER_FLAGS_2(grev, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(grevw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(gorc, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(gorcw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(clmulh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- 
- /* Special functions */
- DEF_HELPER_3(csrrw, tl, env, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index f09f8d5faf..617ead6669 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -689,6 +689,10 @@ gorc       0010100 .......... 101 ..... 0110011 @r
- sh1add     0010000 .......... 010 ..... 0110011 @r
- sh2add     0010000 .......... 100 ..... 0110011 @r
- sh3add     0010000 .......... 110 ..... 0110011 @r
-+clmul      0000101 .......... 001 ..... 0110011 @r
-+clmulh     0000101 .......... 011 ..... 0110011 @r
-+clmulr     0000101 .......... 010 ..... 0110011 @r
-+
- 
- bseti      00101. ........... 001 ..... 0010011 @sh
- bclri      01001. ........... 001 ..... 0010011 @sh
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index 9e81f6e3de..181cbf285c 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -237,6 +237,17 @@ GEN_TRANS_SHADD(1)
- GEN_TRANS_SHADD(2)
- GEN_TRANS_SHADD(3)
- 
-+#define GEN_TRANS_CLMUL(NAME)                                             \
-+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a)                \
-+{                                                                         \
-+    REQUIRE_EXT(ctx, RVB);                                                \
-+    return gen_arith(ctx, a, gen_helper_##NAME);                          \
-+}
-+
-+GEN_TRANS_CLMUL(clmul)
-+GEN_TRANS_CLMUL(clmulh)
-+GEN_TRANS_CLMUL(clmulr)
-+
- static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
- {
-     REQUIRE_64BIT(ctx);
--- 
-2.33.0
-

+ 0 - 142
recipes-devtools/qemu/files/0002-target-riscv-rvb-add-cmix-cmov-instruction.patch

@@ -1,142 +0,0 @@
-From 9401079bda6803c2b7c6251eb579fca5657991d1 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Thu, 8 Jul 2021 13:43:50 +0800
-Subject: [PATCH 02/11] target/riscv: rvb: add cmix/cmov instruction
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 12 ++++++++++
- target/riscv/helper.h                   |  1 +
- target/riscv/insn32.decode              |  5 ++++-
- target/riscv/insn_trans/trans_rvb.c.inc | 12 ++++++++++
- target/riscv/translate.c                | 29 +++++++++++++++++++++++++
- 5 files changed, 58 insertions(+), 1 deletion(-)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index 29dfe921ab..46b51399f2 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -122,3 +122,15 @@ target_ulong HELPER(clmulr)(target_ulong rs1, target_ulong rs2)
- {
-     return do_clmulr(rs1, rs2, TARGET_LONG_BITS);
- }
-+
-+static target_ulong do_cmov(target_ulong rs1,
-+                            target_ulong rs2,
-+                            target_ulong rs3)
-+{
-+    return rs2 ? rs1 : rs3;
-+}
-+
-+target_ulong HELPER(cmov)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
-+{
-+    return do_cmov(rs1, rs2, rs3);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index 6ee9e8d058..1282aada80 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -66,6 +66,7 @@ DEF_HELPER_FLAGS_2(gorcw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmulh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- 
- /* Special functions */
- DEF_HELPER_3(csrrw, tl, env, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index 617ead6669..06527db5f2 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -42,6 +42,7 @@
- &j    imm rd
- &r    rd rs1 rs2
- &r2   rd rs1
-+&r3   rd rs1 rs2 rs3
- &s    imm rs1 rs2
- &u    imm rd
- &shift     shamt rs1 rd
-@@ -66,6 +67,7 @@
- @atom_ld ..... aq:1 rl:1 ..... ........ ..... ....... &atomic rs2=0     %rs1 %rd
- @atom_st ..... aq:1 rl:1 ..... ........ ..... ....... &atomic %rs2      %rs1 %rd
- 
-+@r3      ..... ..  ..... ..... ... ..... ....... &r3 %rs3 %rs2 %rs1 %rd
- @r4_rm   ..... ..  ..... ..... ... ..... ....... %rs3 %rs2 %rs1 %rm %rd
- @r_rm    .......   ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
- @r2_rm   .......   ..... ..... ... ..... ....... %rs1 %rm %rd
-@@ -692,7 +694,8 @@ sh3add     0010000 .......... 110 ..... 0110011 @r
- clmul      0000101 .......... 001 ..... 0110011 @r
- clmulh     0000101 .......... 011 ..... 0110011 @r
- clmulr     0000101 .......... 010 ..... 0110011 @r
--
-+cmix       .....11 .......... 001 ..... 0110011 @r3
-+cmov       .....11 .......... 101 ..... 0110011 @r3
- 
- bseti      00101. ........... 001 ..... 0010011 @sh
- bclri      01001. ........... 001 ..... 0010011 @sh
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index 181cbf285c..2a143d9d8f 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -248,6 +248,18 @@ GEN_TRANS_CLMUL(clmul)
- GEN_TRANS_CLMUL(clmulh)
- GEN_TRANS_CLMUL(clmulr)
- 
-+static bool trans_cmix(DisasContext *ctx, arg_cmix *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_quat(ctx, a, gen_cmix);
-+}
-+
-+static bool trans_cmov(DisasContext *ctx, arg_cmov *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_quat(ctx, a, gen_helper_cmov);
-+}
-+
- static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
- {
-     REQUIRE_64BIT(ctx);
-diff --git a/target/riscv/translate.c b/target/riscv/translate.c
-index 62a7d7e4c7..50aeb2b4c8 100644
---- a/target/riscv/translate.c
-+++ b/target/riscv/translate.c
-@@ -789,6 +789,35 @@ static bool gen_arith(DisasContext *ctx, arg_r *a,
-     return true;
- }
- 
-+static void gen_cmix(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3)
-+{
-+    tcg_gen_and_tl(arg1, arg1, arg2);
-+    tcg_gen_not_tl(arg2, arg2);
-+    tcg_gen_and_tl(arg3, arg3, arg2);
-+    tcg_gen_or_tl(ret, arg1, arg3);
-+}
-+
-+static bool gen_quat(DisasContext *ctx, arg_r3 *a,
-+                     void(*func)(TCGv, TCGv, TCGv, TCGv))
-+{
-+    TCGv source1, source2, source3;
-+    source1 = tcg_temp_new();
-+    source2 = tcg_temp_new();
-+    source3 = tcg_temp_new();
-+
-+    gen_get_gpr(source1, a->rs1);
-+    gen_get_gpr(source2, a->rs2);
-+    gen_get_gpr(source3, a->rs3);
-+
-+    (*func)(source1, source1, source2, source3);
-+
-+    gen_set_gpr(a->rd, source1);
-+    tcg_temp_free(source1);
-+    tcg_temp_free(source2);
-+    tcg_temp_free(source3);
-+    return true;
-+}
-+
- static bool gen_shift(DisasContext *ctx, arg_r *a,
-                         void(*func)(TCGv, TCGv, TCGv))
- {
--- 
-2.33.0
-

+ 0 - 276
recipes-devtools/qemu/files/0003-target-riscv-rvb-add-funnel-shfit-instruction.patch

@@ -1,276 +0,0 @@
-From 12b81cda67d951924d9a4ae3b3e430c35a2f7f05 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Thu, 8 Jul 2021 19:33:13 +0800
-Subject: [PATCH 03/11] target/riscv: rvb: add funnel shfit instruction
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 54 +++++++++++++++++++++++++
- target/riscv/helper.h                   |  4 ++
- target/riscv/insn32.decode              | 37 +++++++++++------
- target/riscv/insn_trans/trans_rvb.c.inc | 49 ++++++++++++++++++++++
- target/riscv/translate.c                | 21 ++++++++++
- 5 files changed, 152 insertions(+), 13 deletions(-)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index 46b51399f2..b90bdafb62 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -134,3 +134,57 @@ target_ulong HELPER(cmov)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
- {
-     return do_cmov(rs1, rs2, rs3);
- }
-+
-+
-+static target_ulong do_fsl(target_ulong rs1,
-+                           target_ulong rs2,
-+                           target_ulong rs3,
-+                           int bits)
-+{
-+    int shamt = rs2 & (2*bits - 1);
-+    target_ulong a = rs1, b = rs3;
-+
-+    if (shamt >= bits) {
-+        shamt -= bits;
-+        a = rs3;
-+        b = rs1;
-+    }
-+
-+    return shamt ? (a << shamt) | (b >> (bits - shamt)) : a;
-+}
-+
-+target_ulong HELPER(fsl)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
-+{
-+    return do_fsl(rs1, rs2, rs3, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(fsr)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
-+{
-+    return do_fsl(rs1, -rs2, rs3, TARGET_LONG_BITS);
-+}
-+
-+static target_ulong do_fslw(target_ulong rs1,
-+                            target_ulong rs2,
-+                            target_ulong rs3)
-+{
-+    int shamt = rs2 & 63;
-+    target_ulong a = rs1, b = rs3;
-+
-+    if (shamt >= 32) {
-+        shamt -= 32;
-+        a = rs3;
-+        b = rs1;
-+    }
-+
-+    return shamt ? (a << shamt) | (b >> (32 - shamt)) : a;
-+}
-+
-+target_ulong HELPER(fslw)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
-+{
-+    return do_fslw(rs1, rs2, rs3);
-+}
-+
-+target_ulong HELPER(fsrw)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
-+{
-+    return do_fslw(rs1, -rs2, rs3);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index 1282aada80..dcf87f2423 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -67,6 +67,10 @@ DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmulh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-+DEF_HELPER_FLAGS_3(fsl, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-+DEF_HELPER_FLAGS_3(fsr, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-+DEF_HELPER_FLAGS_3(fslw, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-+DEF_HELPER_FLAGS_3(fsrw, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- 
- /* Special functions */
- DEF_HELPER_3(csrrw, tl, env, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index 06527db5f2..e635b0b6e1 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -24,6 +24,7 @@
- %sh5       20:5
- 
- %sh7    20:7
-+%sh6    20:6
- %csr    20:12
- %rm     12:3
- %nf     29:3                     !function=ex_plus_1
-@@ -43,6 +44,7 @@
- &r    rd rs1 rs2
- &r2   rd rs1
- &r3   rd rs1 rs2 rs3
-+&r3i  rd rs1 imm rs3
- &s    imm rs1 rs2
- &u    imm rd
- &shift     shamt rs1 rd
-@@ -62,12 +64,15 @@
- @j       ....................      ..... ....... &j      imm=%imm_j          %rd
- 
- @sh      ......  ...... .....  ... ..... ....... &shift  shamt=%sh7     %rs1 %rd
-+@sh6     ......  ...... .....  ... ..... ....... &shift  shamt=%sh6     %rs1 %rd
- @csr     ............   .....  ... ..... .......               %csr     %rs1 %rd
- 
- @atom_ld ..... aq:1 rl:1 ..... ........ ..... ....... &atomic rs2=0     %rs1 %rd
- @atom_st ..... aq:1 rl:1 ..... ........ ..... ....... &atomic %rs2      %rs1 %rd
- 
- @r3      ..... ..  ..... ..... ... ..... ....... &r3 %rs3 %rs2 %rs1 %rd
-+@r3_imm  ..... .  ...... ..... ... ..... ....... &r3i %rs3 imm=%sh6 %rs1 %rd
-+@r3w_imm ..... .  ...... ..... ... ..... ....... &r3i %rs3 imm=%sh5 %rs1 %rd
- @r4_rm   ..... ..  ..... ..... ... ..... ....... %rs3 %rs2 %rs1 %rm %rd
- @r_rm    .......   ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
- @r2_rm   .......   ..... ..... ... ..... ....... %rs1 %rm %rd
-@@ -128,9 +133,9 @@ sltiu    ............     ..... 011 ..... 0010011 @i
- xori     ............     ..... 100 ..... 0010011 @i
- ori      ............     ..... 110 ..... 0010011 @i
- andi     ............     ..... 111 ..... 0010011 @i
--slli     00000. ......    ..... 001 ..... 0010011 @sh
--srli     00000. ......    ..... 101 ..... 0010011 @sh
--srai     01000. ......    ..... 101 ..... 0010011 @sh
-+slli     000000 ......    ..... 001 ..... 0010011 @sh6
-+srli     000000 ......    ..... 101 ..... 0010011 @sh6
-+srai     010000 ......    ..... 101 ..... 0010011 @sh6
- add      0000000 .....    ..... 000 ..... 0110011 @r
- sub      0100000 .....    ..... 000 ..... 0110011 @r
- sll      0000000 .....    ..... 001 ..... 0110011 @r
-@@ -696,16 +701,19 @@ clmulh     0000101 .......... 011 ..... 0110011 @r
- clmulr     0000101 .......... 010 ..... 0110011 @r
- cmix       .....11 .......... 001 ..... 0110011 @r3
- cmov       .....11 .......... 101 ..... 0110011 @r3
--
--bseti      00101. ........... 001 ..... 0010011 @sh
--bclri      01001. ........... 001 ..... 0010011 @sh
--binvi      01101. ........... 001 ..... 0010011 @sh
--bexti      01001. ........... 101 ..... 0010011 @sh
--sloi       00100. ........... 001 ..... 0010011 @sh
--sroi       00100. ........... 101 ..... 0010011 @sh
--rori       01100. ........... 101 ..... 0010011 @sh
--grevi      01101. ........... 101 ..... 0010011 @sh
--gorci      00101. ........... 101 ..... 0010011 @sh
-+fsl        .....10 .......... 001 ..... 0110011 @r3
-+fsr        .....10 .......... 101 ..... 0110011 @r3
-+
-+fsri       .....1 ........... 101 ..... 0010011 @r3_imm
-+bseti      001010 ........... 001 ..... 0010011 @sh6
-+bclri      010010 ........... 001 ..... 0010011 @sh6
-+binvi      011010 ........... 001 ..... 0010011 @sh6
-+bexti      010010 ........... 101 ..... 0010011 @sh6
-+sloi       001000 ........... 001 ..... 0010011 @sh6
-+sroi       001000 ........... 101 ..... 0010011 @sh6
-+rori       011000 ........... 101 ..... 0010011 @sh6
-+grevi      011010 ........... 101 ..... 0010011 @sh6
-+gorci      001010 ........... 101 ..... 0010011 @sh6
- 
- # *** RV64B Standard Extension (in addition to RV32B) ***
- clzw       0110000 00000 ..... 001 ..... 0011011 @r2
-@@ -728,7 +736,10 @@ sh1add_uw  0010000 .......... 010 ..... 0111011 @r
- sh2add_uw  0010000 .......... 100 ..... 0111011 @r
- sh3add_uw  0010000 .......... 110 ..... 0111011 @r
- add_uw     0000100 .......... 000 ..... 0111011 @r
-+fslw       .....10 .......... 001 ..... 0111011 @r3
-+fsrw       .....10 .......... 101 ..... 0111011 @r3
- 
-+fsriw      .....10 .......... 101 ..... 0011011 @r3w_imm
- bsetiw     0010100 .......... 001 ..... 0011011 @sh5
- bclriw     0100100 .......... 001 ..... 0011011 @sh5
- binviw     0110100 .......... 001 ..... 0011011 @sh5
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index 2a143d9d8f..91762aace5 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -260,6 +260,55 @@ static bool trans_cmov(DisasContext *ctx, arg_cmov *a)
-     return gen_quat(ctx, a, gen_helper_cmov);
- }
- 
-+static bool trans_fsl(DisasContext *ctx, arg_fsl *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_quat(ctx, a, gen_helper_fsl);
-+}
-+
-+static bool trans_fsr(DisasContext *ctx, arg_fsr *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_quat(ctx, a, gen_helper_fsr);
-+}
-+
-+static bool trans_fsri(DisasContext *ctx, arg_fsri *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+
-+    if (a->imm >= 64) {
-+        return false;
-+    }
-+
-+    return gen_quati(ctx, a, gen_helper_fsr);
-+}
-+
-+static bool trans_fslw(DisasContext *ctx, arg_fslw *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_quat(ctx, a, gen_helper_fsl);
-+}
-+
-+static bool trans_fsrw(DisasContext *ctx, arg_fsrw *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_quat(ctx, a, gen_helper_fsrw);
-+}
-+
-+static bool trans_fsriw(DisasContext *ctx, arg_fsri *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+
-+    if (a->imm >= 32) {
-+        return false;
-+    }
-+
-+    return gen_quati(ctx, a, gen_helper_fsrw);
-+}
-+
- static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
- {
-     REQUIRE_64BIT(ctx);
-diff --git a/target/riscv/translate.c b/target/riscv/translate.c
-index 50aeb2b4c8..daaffa2b26 100644
---- a/target/riscv/translate.c
-+++ b/target/riscv/translate.c
-@@ -836,6 +836,27 @@ static bool gen_shift(DisasContext *ctx, arg_r *a,
-     return true;
- }
- 
-+static bool gen_quati(DisasContext *ctx, arg_r3i *a,
-+                      void(*func)(TCGv, TCGv, TCGv, TCGv))
-+{
-+    TCGv source1, source2, source3;
-+    source1 = tcg_temp_new();
-+    source2 = tcg_temp_new();
-+    source3 = tcg_temp_new();
-+
-+    gen_get_gpr(source1, a->rs1);
-+    tcg_gen_movi_tl(source2, a->imm);
-+    gen_get_gpr(source3, a->rs3);
-+
-+    (*func)(source1, source1, source2, source3);
-+
-+    gen_set_gpr(a->rd, source1);
-+    tcg_temp_free(source1);
-+    tcg_temp_free(source2);
-+    tcg_temp_free(source3);
-+    return true;
-+}
-+
- static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
- {
-     DisasContext *ctx = container_of(dcbase, DisasContext, base);
--- 
-2.33.0
-

+ 0 - 264
recipes-devtools/qemu/files/0004-target-riscv-rvb-add-generalized-shuffle.patch

@@ -1,264 +0,0 @@
-From ff1e4f73a392660a0f9f9a728c199caadfe9aefe Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Fri, 9 Jul 2021 10:13:36 +0800
-Subject: [PATCH 04/11] target/riscv: rvb: add generalized shuffle
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 132 ++++++++++++++++++++++++
- target/riscv/helper.h                   |   4 +
- target/riscv/insn32.decode              |   6 ++
- target/riscv/insn_trans/trans_rvb.c.inc |  44 ++++++++
- 4 files changed, 186 insertions(+)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index b90bdafb62..0e4780a4cb 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -188,3 +188,135 @@ target_ulong HELPER(fsrw)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
- {
-     return do_fslw(rs1, -rs2, rs3);
- }
-+static target_ulong do_shfl(target_ulong rs1,
-+                            target_ulong rs2,
-+                            int bits)
-+{
-+    target_ulong x = rs1;
-+    int shamt = rs2 & ((bits-1) >> 1);
-+    if (shamt & 16)
-+        x = (x & 0xFFFF00000000FFFFLL) |
-+            ((x & 0x0000FFFF00000000LL) >> 16) |
-+            ((x & 0x00000000FFFF0000LL) << 16);
-+    if (shamt &  8)
-+        x = (x & 0xFF0000FFFF0000FFLL) |
-+            ((x & 0x00FF000000FF0000LL) >>  8) |
-+            ((x & 0x0000FF000000FF00LL) <<  8);
-+    if (shamt &  4)
-+        x = (x & 0xF00FF00FF00FF00FLL) |
-+            ((x & 0x0F000F000F000F00LL) >>  4) |
-+            ((x & 0x00F000F000F000F0LL) <<  4);
-+    if (shamt &  2)
-+        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
-+            ((x & 0x3030303030303030LL) >>  2) |
-+            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
-+    if (shamt &  1)
-+        x = (x & 0x9999999999999999LL) |
-+            ((x & 0x4444444444444444LL) >>  1) |
-+            ((x & 0x2222222222222222LL) <<  1);
-+
-+    return x;
-+}
-+
-+static target_ulong do_unshfl(target_ulong rs1,
-+                              target_ulong rs2,
-+                              int bits)
-+{
-+    target_ulong x = rs1;
-+
-+    int shamt = rs2 & ((bits-1) >> 1);
-+    if (shamt &  1)
-+        x = (x & 0x9999999999999999LL) |
-+            ((x & 0x4444444444444444LL) >>  1) |
-+            ((x & 0x2222222222222222LL) <<  1);
-+    if (shamt &  2)
-+        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
-+            ((x & 0x3030303030303030LL) >>  2) |
-+            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
-+    if (shamt &  4)
-+        x = (x & 0xF00FF00FF00FF00FLL) |
-+            ((x & 0x0F000F000F000F00LL) >>  4) |
-+            ((x & 0x00F000F000F000F0LL) <<  4);
-+    if (shamt &  8)
-+        x = (x & 0xFF0000FFFF0000FFLL) |
-+            ((x & 0x00FF000000FF0000LL) >>  8) |
-+            ((x & 0x0000FF000000FF00LL) <<  8);
-+    if (shamt & 16)
-+        x = (x & 0xFFFF00000000FFFFLL) |
-+            ((x & 0x0000FFFF00000000LL) >> 16) |
-+            ((x & 0x00000000FFFF0000LL) << 16);
-+
-+    return x;
-+}
-+
-+static target_ulong do_shflw(target_ulong rs1,
-+                             target_ulong rs2,
-+                             int bits)
-+{
-+    target_ulong x = rs1;
-+    int shamt = rs2 & 15;
-+    if (shamt & 8)
-+        x = (x & 0xFF0000FFFF0000FFLL) |
-+            ((x & 0x00FF000000FF0000LL) >>  8) |
-+            ((x & 0x0000FF000000FF00LL) <<  8);
-+    if (shamt & 4)
-+        x = (x & 0xF00FF00FF00FF00FLL) |
-+            ((x & 0x0F000F000F000F00LL) >>  4) |
-+            ((x & 0x00F000F000F000F0LL) <<  4);
-+    if (shamt & 2)
-+        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
-+            ((x & 0x3030303030303030LL) >>  2) |
-+            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
-+    if (shamt & 1)
-+        x = (x & 0x9999999999999999LL) |
-+            ((x & 0x4444444444444444LL) >>  1) |
-+            ((x & 0x2222222222222222LL) <<  1);
-+
-+    return x;
-+}
-+
-+static target_ulong do_unshflw(target_ulong rs1,
-+                               target_ulong rs2,
-+                               int bits)
-+{
-+    target_ulong x = rs1;
-+    int shamt = rs2 & 15;
-+    if (shamt & 1)
-+        x = (x & 0x9999999999999999LL) |
-+            ((x & 0x4444444444444444LL) >>  1) |
-+            ((x & 0x2222222222222222LL) <<  1);
-+    if (shamt & 2)
-+        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
-+            ((x & 0x3030303030303030LL) >>  2) |
-+            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
-+    if (shamt & 4)
-+        x = (x & 0xF00FF00FF00FF00FLL) |
-+            ((x & 0x0F000F000F000F00LL) >>  4) |
-+            ((x & 0x00F000F000F000F0LL) <<  4);
-+    if (shamt & 8)
-+        x = (x & 0xFF0000FFFF0000FFLL) |
-+            ((x & 0x00FF000000FF0000LL) >>  8) |
-+            ((x & 0x0000FF000000FF00LL) <<  8);
-+
-+    return x;
-+}
-+
-+target_ulong HELPER(shfl)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_shfl(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(unshfl)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_unshfl(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(shflw)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_shflw(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(unshflw)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_unshflw(rs1, rs2, TARGET_LONG_BITS);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index dcf87f2423..8190b72880 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -66,6 +66,10 @@ DEF_HELPER_FLAGS_2(gorcw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmulh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(shfl, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(unshfl, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(shflw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(unshflw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsl, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsr, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index e635b0b6e1..cd3518d716 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -699,6 +699,8 @@ sh3add     0010000 .......... 110 ..... 0110011 @r
- clmul      0000101 .......... 001 ..... 0110011 @r
- clmulh     0000101 .......... 011 ..... 0110011 @r
- clmulr     0000101 .......... 010 ..... 0110011 @r
-+shfl       0000100 .......... 001 ..... 0110011 @r
-+unshfl     0000100 .......... 101 ..... 0110011 @r
- cmix       .....11 .......... 001 ..... 0110011 @r3
- cmov       .....11 .......... 101 ..... 0110011 @r3
- fsl        .....10 .......... 001 ..... 0110011 @r3
-@@ -714,6 +716,8 @@ sroi       001000 ........... 101 ..... 0010011 @sh6
- rori       011000 ........... 101 ..... 0010011 @sh6
- grevi      011010 ........... 101 ..... 0010011 @sh6
- gorci      001010 ........... 101 ..... 0010011 @sh6
-+shfli      000010 ........... 001 ..... 0010011 @sh6
-+unshfli    000010 ........... 101 ..... 0010011 @sh6
- 
- # *** RV64B Standard Extension (in addition to RV32B) ***
- clzw       0110000 00000 ..... 001 ..... 0011011 @r2
-@@ -736,6 +740,8 @@ sh1add_uw  0010000 .......... 010 ..... 0111011 @r
- sh2add_uw  0010000 .......... 100 ..... 0111011 @r
- sh3add_uw  0010000 .......... 110 ..... 0111011 @r
- add_uw     0000100 .......... 000 ..... 0111011 @r
-+shflw      0000100 .......... 001 ..... 0111011 @r
-+unshflw    0000100 .......... 101 ..... 0111011 @r
- fslw       .....10 .......... 001 ..... 0111011 @r3
- fsrw       .....10 .......... 101 ..... 0111011 @r3
- 
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index 91762aace5..4317a8b2db 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -248,6 +248,36 @@ GEN_TRANS_CLMUL(clmul)
- GEN_TRANS_CLMUL(clmulh)
- GEN_TRANS_CLMUL(clmulr)
- 
-+static bool trans_shfl(DisasContext *ctx, arg_shfl *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_shfl);
-+}
-+
-+static bool trans_unshfl(DisasContext *ctx, arg_unshfl *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_unshfl);
-+}
-+
-+static bool trans_shfli(DisasContext *ctx, arg_shfli *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    if (a->shamt >= (TARGET_LONG_BITS / 2)) {
-+        return false;
-+    }
-+    return gen_shifti(ctx, a, gen_helper_shfl);
-+}
-+
-+static bool trans_unshfli(DisasContext *ctx, arg_unshfli *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    if (a->shamt >= (TARGET_LONG_BITS / 2)) {
-+        return false;
-+    }
-+    return gen_shifti(ctx, a, gen_helper_unshfl);
-+}
-+
- static bool trans_cmix(DisasContext *ctx, arg_cmix *a)
- {
-     REQUIRE_EXT(ctx, RVB);
-@@ -283,6 +313,20 @@ static bool trans_fsri(DisasContext *ctx, arg_fsri *a)
-     return gen_quati(ctx, a, gen_helper_fsr);
- }
- 
-+static bool trans_shflw(DisasContext *ctx, arg_shflw *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_shflw);
-+}
-+
-+static bool trans_unshflw(DisasContext *ctx, arg_unshflw *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_unshflw);
-+}
-+
- static bool trans_fslw(DisasContext *ctx, arg_fslw *a)
- {
-     REQUIRE_64BIT(ctx);
--- 
-2.33.0
-

+ 0 - 137
recipes-devtools/qemu/files/0005-target-riscv-rvb-add-crossbar-permutation-instructio.patch

@@ -1,137 +0,0 @@
-From 8aee0b0ae5bf3becd7544e0ed0694268ccaf135a Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Fri, 9 Jul 2021 11:02:06 +0800
-Subject: [PATCH 05/11] target/riscv: rvb: add crossbar permutation
- instructions
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 39 +++++++++++++++++++++++++
- target/riscv/helper.h                   |  4 +++
- target/riscv/insn32.decode              |  4 +++
- target/riscv/insn_trans/trans_rvb.c.inc | 18 ++++++++++++
- 4 files changed, 65 insertions(+)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index 0e4780a4cb..6185fe44d3 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -320,3 +320,42 @@ target_ulong HELPER(unshflw)(target_ulong rs1, target_ulong rs2)
- {
-     return do_unshflw(rs1, rs2, TARGET_LONG_BITS);
- }
-+
-+static target_ulong do_xperm(target_ulong rs1,
-+                             target_ulong rs2,
-+                             int sz_log2,
-+                             int bits)
-+{
-+    target_ulong pos = 0;
-+    target_ulong r = 0;
-+    target_ulong sz = 1LL << sz_log2;
-+    target_ulong mask = (1LL << sz) - 1;
-+    int i;
-+    for (i = 0; i < bits; i += sz) {
-+        pos = ((rs2 >> i) & mask) << sz_log2;
-+        if (pos < bits)
-+            r |= ((rs1 >> pos) & mask) << i;
-+    }
-+
-+    return r;
-+}
-+
-+target_ulong HELPER(xperm_n)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_xperm(rs1, rs2, 2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(xperm_b)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_xperm(rs1, rs2, 3, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(xperm_h)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_xperm(rs1, rs2, 4, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(xperm_w)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_xperm(rs1, rs2, 5, TARGET_LONG_BITS);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index 8190b72880..2e6d4c3704 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -70,6 +70,10 @@ DEF_HELPER_FLAGS_2(shfl, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(unshfl, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(shflw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(unshflw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(xperm_n, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(xperm_b, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(xperm_h, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(xperm_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsl, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsr, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index cd3518d716..3fcddcd5e3 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -701,6 +701,9 @@ clmulh     0000101 .......... 011 ..... 0110011 @r
- clmulr     0000101 .......... 010 ..... 0110011 @r
- shfl       0000100 .......... 001 ..... 0110011 @r
- unshfl     0000100 .......... 101 ..... 0110011 @r
-+xperm_n    0010100 .......... 010 ..... 0110011 @r
-+xperm_b    0010100 .......... 100 ..... 0110011 @r
-+xperm_h    0010100 .......... 110 ..... 0110011 @r
- cmix       .....11 .......... 001 ..... 0110011 @r3
- cmov       .....11 .......... 101 ..... 0110011 @r3
- fsl        .....10 .......... 001 ..... 0110011 @r3
-@@ -742,6 +745,7 @@ sh3add_uw  0010000 .......... 110 ..... 0111011 @r
- add_uw     0000100 .......... 000 ..... 0111011 @r
- shflw      0000100 .......... 001 ..... 0111011 @r
- unshflw    0000100 .......... 101 ..... 0111011 @r
-+xperm_w    0010100 .......... 000 ..... 0110011 @r
- fslw       .....10 .......... 001 ..... 0111011 @r3
- fsrw       .....10 .......... 101 ..... 0111011 @r3
- 
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index 4317a8b2db..12cfb0d955 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -278,6 +278,17 @@ static bool trans_unshfli(DisasContext *ctx, arg_unshfli *a)
-     return gen_shifti(ctx, a, gen_helper_unshfl);
- }
- 
-+#define GEN_TRANS_XPERM(NAME)                                   \
-+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a)      \
-+{                                                               \
-+    REQUIRE_EXT(ctx, RVB);                                      \
-+    return gen_arith(ctx, a, gen_helper_##NAME);                \
-+}
-+
-+GEN_TRANS_XPERM(xperm_n)
-+GEN_TRANS_XPERM(xperm_b)
-+GEN_TRANS_XPERM(xperm_h)
-+
- static bool trans_cmix(DisasContext *ctx, arg_cmix *a)
- {
-     REQUIRE_EXT(ctx, RVB);
-@@ -327,6 +338,13 @@ static bool trans_unshflw(DisasContext *ctx, arg_unshflw *a)
-     return gen_arith(ctx, a, gen_helper_unshflw);
- }
- 
-+static bool trans_xperm_w(DisasContext *ctx, arg_xperm_w *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_xperm_w);
-+}
-+
- static bool trans_fslw(DisasContext *ctx, arg_fslw *a)
- {
-     REQUIRE_64BIT(ctx);
--- 
-2.33.0
-

+ 0 - 153
recipes-devtools/qemu/files/0006-target-riscv-rvb-add-bfp-bfpw-instructions.patch

@@ -1,153 +0,0 @@
-From 27fd4ca5ae5a5cd8ba49482305d54961a2031932 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Fri, 9 Jul 2021 12:36:57 +0800
-Subject: [PATCH 06/11] target/riscv: rvb: add bfp/bfpw instructions
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 40 ++++++++++++++++++++++++-
- target/riscv/helper.h                   |  2 ++
- target/riscv/insn32.decode              |  2 ++
- target/riscv/insn_trans/trans_rvb.c.inc | 13 ++++++++
- target/riscv/translate.c                |  6 ++++
- 5 files changed, 62 insertions(+), 1 deletion(-)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index 6185fe44d3..a610b22fa6 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -135,7 +135,6 @@ target_ulong HELPER(cmov)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
-     return do_cmov(rs1, rs2, rs3);
- }
- 
--
- static target_ulong do_fsl(target_ulong rs1,
-                            target_ulong rs2,
-                            target_ulong rs3,
-@@ -359,3 +358,42 @@ target_ulong HELPER(xperm_w)(target_ulong rs1, target_ulong rs2)
- {
-     return do_xperm(rs1, rs2, 5, TARGET_LONG_BITS);
- }
-+
-+static target_ulong do_bfp(target_ulong rs1,
-+                           target_ulong rs2,
-+                           int bits)
-+{
-+    target_ulong cfg = rs2 >> (bits/2);
-+    if ((cfg >> 30) == 2)
-+        cfg = cfg >> 16;
-+    int len = (cfg >> 8) & (bits/2 - 1);
-+    int off = cfg & (bits - 1);
-+    len = len ? len : bits/2;
-+    target_ulong mask = ~(~(target_ulong)0 << len) << off;
-+    target_ulong data = rs2 << off;
-+
-+    return (data & mask) | (rs1 & ~mask);
-+}
-+
-+static target_ulong do_bfpw(target_ulong rs1,
-+                            target_ulong rs2)
-+{
-+    target_ulong cfg = rs2 >> 16;
-+    int len = (cfg >> 8) & 15;
-+    int off = cfg & 31;
-+    len = len ? len : 16;
-+    target_ulong mask = ~(~(target_ulong)(0) << len) << off;
-+    target_ulong data = rs2 << off;
-+
-+    return (data & mask) | (rs1 & ~mask);
-+}
-+
-+target_ulong HELPER(bfp)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_bfp(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(bfpw)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_bfpw(rs1, rs2);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index 2e6d4c3704..2ae199f399 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -74,6 +74,8 @@ DEF_HELPER_FLAGS_2(xperm_n, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(xperm_b, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(xperm_h, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(xperm_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(bfp, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(bfpw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsl, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsr, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index 3fcddcd5e3..8187c42323 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -704,6 +704,7 @@ unshfl     0000100 .......... 101 ..... 0110011 @r
- xperm_n    0010100 .......... 010 ..... 0110011 @r
- xperm_b    0010100 .......... 100 ..... 0110011 @r
- xperm_h    0010100 .......... 110 ..... 0110011 @r
-+bfp        0100100 .......... 111 ..... 0110011 @r
- cmix       .....11 .......... 001 ..... 0110011 @r3
- cmov       .....11 .......... 101 ..... 0110011 @r3
- fsl        .....10 .......... 001 ..... 0110011 @r3
-@@ -746,6 +747,7 @@ add_uw     0000100 .......... 000 ..... 0111011 @r
- shflw      0000100 .......... 001 ..... 0111011 @r
- unshflw    0000100 .......... 101 ..... 0111011 @r
- xperm_w    0010100 .......... 000 ..... 0110011 @r
-+bfpw       0100100 .......... 111 ..... 0111011 @r
- fslw       .....10 .......... 001 ..... 0111011 @r3
- fsrw       .....10 .......... 101 ..... 0111011 @r3
- 
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index 12cfb0d955..e15f0a7999 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -289,6 +289,12 @@ GEN_TRANS_XPERM(xperm_n)
- GEN_TRANS_XPERM(xperm_b)
- GEN_TRANS_XPERM(xperm_h)
- 
-+static bool trans_bfp(DisasContext *ctx, arg_bfp *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_bfp);
-+}
-+
- static bool trans_cmix(DisasContext *ctx, arg_cmix *a)
- {
-     REQUIRE_EXT(ctx, RVB);
-@@ -345,6 +351,13 @@ static bool trans_xperm_w(DisasContext *ctx, arg_xperm_w *a)
-     return gen_arith(ctx, a, gen_helper_xperm_w);
- }
- 
-+static bool trans_bfpw(DisasContext *ctx, arg_bfpw *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_bfpw);
-+}
-+
- static bool trans_fslw(DisasContext *ctx, arg_fslw *a)
- {
-     REQUIRE_64BIT(ctx);
-diff --git a/target/riscv/translate.c b/target/riscv/translate.c
-index daaffa2b26..37eb0ea046 100644
---- a/target/riscv/translate.c
-+++ b/target/riscv/translate.c
-@@ -857,6 +857,12 @@ static bool gen_quati(DisasContext *ctx, arg_r3i *a,
-     return true;
- }
- 
-+static void gen_bfpw(TCGv ret, TCGv arg1, TCGv arg2)
-+{
-+    gen_helper_bfpw(ret, arg1, arg2);
-+    tcg_gen_ext32s_tl(ret, ret);
-+}
-+
- static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
- {
-     DisasContext *ctx = container_of(dcbase, DisasContext, base);
--- 
-2.33.0
-

+ 0 - 164
recipes-devtools/qemu/files/0007-target-riscv-rvb-modified-some-errors-on-some-rv64-o.patch

@@ -1,164 +0,0 @@
-From 8df40236e0d5f32a2c69b0864d378d169a851de3 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Fri, 9 Jul 2021 14:04:34 +0800
-Subject: [PATCH 07/11] target/riscv: rvb: modified some errors on some rv64
- only support instrutions
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 13 ++++++------
- target/riscv/insn_trans/trans_rvb.c.inc | 10 ++++-----
- target/riscv/translate.c                | 27 +++++++++++++++++++------
- 3 files changed, 32 insertions(+), 18 deletions(-)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index a610b22fa6..d9c5c56468 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -175,7 +175,7 @@ static target_ulong do_fslw(target_ulong rs1,
-         b = rs1;
-     }
- 
--    return shamt ? (a << shamt) | (b >> (32 - shamt)) : a;
-+    return shamt ? (a << shamt) | ((b & 0xffffffff) >> (32 - shamt)) : a;
- }
- 
- target_ulong HELPER(fslw)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
-@@ -187,6 +187,7 @@ target_ulong HELPER(fsrw)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
- {
-     return do_fslw(rs1, -rs2, rs3);
- }
-+
- static target_ulong do_shfl(target_ulong rs1,
-                             target_ulong rs2,
-                             int bits)
-@@ -249,8 +250,7 @@ static target_ulong do_unshfl(target_ulong rs1,
- }
- 
- static target_ulong do_shflw(target_ulong rs1,
--                             target_ulong rs2,
--                             int bits)
-+                             target_ulong rs2)
- {
-     target_ulong x = rs1;
-     int shamt = rs2 & 15;
-@@ -275,8 +275,7 @@ static target_ulong do_shflw(target_ulong rs1,
- }
- 
- static target_ulong do_unshflw(target_ulong rs1,
--                               target_ulong rs2,
--                               int bits)
-+                               target_ulong rs2)
- {
-     target_ulong x = rs1;
-     int shamt = rs2 & 15;
-@@ -312,12 +311,12 @@ target_ulong HELPER(unshfl)(target_ulong rs1, target_ulong rs2)
- 
- target_ulong HELPER(shflw)(target_ulong rs1, target_ulong rs2)
- {
--    return do_shflw(rs1, rs2, TARGET_LONG_BITS);
-+    return do_shflw(rs1, rs2);
- }
- 
- target_ulong HELPER(unshflw)(target_ulong rs1, target_ulong rs2)
- {
--    return do_unshflw(rs1, rs2, TARGET_LONG_BITS);
-+    return do_unshflw(rs1, rs2);
- }
- 
- static target_ulong do_xperm(target_ulong rs1,
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index e15f0a7999..28a5b34bd4 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -334,14 +334,14 @@ static bool trans_shflw(DisasContext *ctx, arg_shflw *a)
- {
-     REQUIRE_64BIT(ctx);
-     REQUIRE_EXT(ctx, RVB);
--    return gen_arith(ctx, a, gen_helper_shflw);
-+    return gen_arith(ctx, a, gen_shflw);
- }
- 
- static bool trans_unshflw(DisasContext *ctx, arg_unshflw *a)
- {
-     REQUIRE_64BIT(ctx);
-     REQUIRE_EXT(ctx, RVB);
--    return gen_arith(ctx, a, gen_helper_unshflw);
-+    return gen_arith(ctx, a, gen_unshflw);
- }
- 
- static bool trans_xperm_w(DisasContext *ctx, arg_xperm_w *a)
-@@ -362,14 +362,14 @@ static bool trans_fslw(DisasContext *ctx, arg_fslw *a)
- {
-     REQUIRE_64BIT(ctx);
-     REQUIRE_EXT(ctx, RVB);
--    return gen_quat(ctx, a, gen_helper_fsl);
-+    return gen_quat(ctx, a, gen_fslw);
- }
- 
- static bool trans_fsrw(DisasContext *ctx, arg_fsrw *a)
- {
-     REQUIRE_64BIT(ctx);
-     REQUIRE_EXT(ctx, RVB);
--    return gen_quat(ctx, a, gen_helper_fsrw);
-+    return gen_quat(ctx, a, gen_fsrw);
- }
- 
- static bool trans_fsriw(DisasContext *ctx, arg_fsri *a)
-@@ -381,7 +381,7 @@ static bool trans_fsriw(DisasContext *ctx, arg_fsri *a)
-         return false;
-     }
- 
--    return gen_quati(ctx, a, gen_helper_fsrw);
-+    return gen_quati(ctx, a, gen_fsrw);
- }
- 
- static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
-diff --git a/target/riscv/translate.c b/target/riscv/translate.c
-index 37eb0ea046..24bcf8c580 100644
---- a/target/riscv/translate.c
-+++ b/target/riscv/translate.c
-@@ -771,6 +771,27 @@ static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
-     tcg_gen_add_tl(ret, arg1, arg2);
- }
- 
-+#define GEN_RV64ONLY_INSN_3(NAME)                                   \
-+static void gen_##NAME(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3)   \
-+{                                                                   \
-+    gen_helper_##NAME(ret, arg1, arg2, arg3);                       \
-+    tcg_gen_ext32s_tl(ret, ret);                                    \
-+}                                                                   \
-+
-+GEN_RV64ONLY_INSN_3(fslw)
-+GEN_RV64ONLY_INSN_3(fsrw)
-+
-+#define GEN_RV64ONLY_INSN_2(NAME)                                   \
-+static void gen_##NAME(TCGv ret, TCGv arg1, TCGv arg2)              \
-+{                                                                   \
-+    gen_helper_##NAME(ret, arg1, arg2);                             \
-+    tcg_gen_ext32s_tl(ret, ret);                                    \
-+}                                                                   \
-+
-+GEN_RV64ONLY_INSN_2(shflw)
-+GEN_RV64ONLY_INSN_2(unshflw)
-+GEN_RV64ONLY_INSN_2(bfpw)
-+
- static bool gen_arith(DisasContext *ctx, arg_r *a,
-                       void(*func)(TCGv, TCGv, TCGv))
- {
-@@ -857,12 +878,6 @@ static bool gen_quati(DisasContext *ctx, arg_r3i *a,
-     return true;
- }
- 
--static void gen_bfpw(TCGv ret, TCGv arg1, TCGv arg2)
--{
--    gen_helper_bfpw(ret, arg1, arg2);
--    tcg_gen_ext32s_tl(ret, ret);
--}
--
- static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
- {
-     DisasContext *ctx = container_of(dcbase, DisasContext, base);
--- 
-2.33.0
-

+ 0 - 174
recipes-devtools/qemu/files/0008-target-riscv-rvb-add-bcompress-bdecompress-instructi.patch

@@ -1,174 +0,0 @@
-From ffb3819a1bb82a3143449726e8d636f509772750 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Fri, 9 Jul 2021 15:17:18 +0800
-Subject: [PATCH 08/11] target/riscv: rvb: add bcompress/bdecompress
- instructions
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 44 +++++++++++++++++++++++++
- target/riscv/helper.h                   |  2 ++
- target/riscv/insn32.decode              |  4 +++
- target/riscv/insn_trans/trans_rvb.c.inc | 24 ++++++++++++++
- target/riscv/translate.c                | 16 +++++++++
- 5 files changed, 90 insertions(+)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index d9c5c56468..5e46b69159 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -396,3 +396,47 @@ target_ulong HELPER(bfpw)(target_ulong rs1, target_ulong rs2)
- {
-     return do_bfpw(rs1, rs2);
- }
-+
-+static target_ulong do_bcompress(target_ulong rs1,
-+                                 target_ulong rs2,
-+                                 int bits)
-+{
-+    target_ulong r = 0;
-+    int i, j = 0;
-+    for (i = 0; i < bits; i++) {
-+        if ((rs2 >> i) & 1) {
-+            if ((rs1 >> i) & 1)
-+                r |= (target_ulong)1 << j;
-+            j++;
-+        }
-+    }
-+
-+    return r;
-+}
-+
-+static target_ulong do_bdecompress(target_ulong rs1,
-+                                   target_ulong rs2,
-+                                   int bits)
-+{
-+    target_ulong r = 0;
-+    int i, j = 0;
-+    for (i = 0; i < bits; i++) {
-+        if ((rs2 >> i) & 1) {
-+            if ((rs1 >> j) & 1)
-+                r |= (target_ulong)1 << i;
-+            j++;
-+        }
-+    }
-+
-+    return r;
-+}
-+
-+target_ulong HELPER(bcompress)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_bcompress(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(bdecompress)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_bdecompress(rs1, rs2, TARGET_LONG_BITS);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index 2ae199f399..577f9f6811 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -76,6 +76,8 @@ DEF_HELPER_FLAGS_2(xperm_h, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(xperm_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(bfp, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(bfpw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(bcompress, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(bdecompress, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsl, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsr, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index 8187c42323..8aaca89ce9 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -709,6 +709,8 @@ cmix       .....11 .......... 001 ..... 0110011 @r3
- cmov       .....11 .......... 101 ..... 0110011 @r3
- fsl        .....10 .......... 001 ..... 0110011 @r3
- fsr        .....10 .......... 101 ..... 0110011 @r3
-+bcompress    0000100 .......... 110 ..... 0110011 @r
-+bdecompress  0100100 .......... 110 ..... 0110011 @r
- 
- fsri       .....1 ........... 101 ..... 0010011 @r3_imm
- bseti      001010 ........... 001 ..... 0010011 @sh6
-@@ -750,6 +752,8 @@ xperm_w    0010100 .......... 000 ..... 0110011 @r
- bfpw       0100100 .......... 111 ..... 0111011 @r
- fslw       .....10 .......... 001 ..... 0111011 @r3
- fsrw       .....10 .......... 101 ..... 0111011 @r3
-+bcompressw   0000100 .......... 110 ..... 0111011 @r
-+bdecompressw 0100100 .......... 110 ..... 0111011 @r
- 
- fsriw      .....10 .......... 101 ..... 0011011 @r3w_imm
- bsetiw     0010100 .......... 001 ..... 0011011 @sh5
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index 28a5b34bd4..d4374d174e 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -295,6 +295,18 @@ static bool trans_bfp(DisasContext *ctx, arg_bfp *a)
-     return gen_arith(ctx, a, gen_helper_bfp);
- }
- 
-+static bool trans_bcompress(DisasContext *ctx, arg_bcompress *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_bcompress);
-+}
-+
-+static bool trans_bdecompress(DisasContext *ctx, arg_bdecompress *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_bdecompress);
-+}
-+
- static bool trans_cmix(DisasContext *ctx, arg_cmix *a)
- {
-     REQUIRE_EXT(ctx, RVB);
-@@ -384,6 +396,18 @@ static bool trans_fsriw(DisasContext *ctx, arg_fsri *a)
-     return gen_quati(ctx, a, gen_fsrw);
- }
- 
-+static bool trans_bcompressw(DisasContext *ctx, arg_bcompressw *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_bcompressw);
-+}
-+
-+static bool trans_bdecompressw(DisasContext *ctx, arg_bdecompressw *a)
-+{
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_bdecompressw);
-+}
-+
- static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
- {
-     REQUIRE_64BIT(ctx);
-diff --git a/target/riscv/translate.c b/target/riscv/translate.c
-index 24bcf8c580..79d93feec3 100644
---- a/target/riscv/translate.c
-+++ b/target/riscv/translate.c
-@@ -792,6 +792,22 @@ GEN_RV64ONLY_INSN_2(shflw)
- GEN_RV64ONLY_INSN_2(unshflw)
- GEN_RV64ONLY_INSN_2(bfpw)
- 
-+static void gen_bcompressw(TCGv ret, TCGv arg1, TCGv arg2)
-+{
-+    tcg_gen_ext32u_tl(arg1, arg1);
-+    tcg_gen_ext32u_tl(arg2, arg2);
-+    gen_helper_bcompress(ret, arg1, arg2);
-+    tcg_gen_ext32s_tl(ret, ret);
-+}
-+
-+static void gen_bdecompressw(TCGv ret, TCGv arg1, TCGv arg2)
-+{
-+    tcg_gen_ext32u_tl(arg1, arg1);
-+    tcg_gen_ext32u_tl(arg2, arg2);
-+    gen_helper_bdecompress(ret, arg1, arg2);
-+    tcg_gen_ext32s_tl(ret, ret);
-+}
-+
- static bool gen_arith(DisasContext *ctx, arg_r *a,
-                       void(*func)(TCGv, TCGv, TCGv))
- {
--- 
-2.33.0
-

+ 0 - 262
recipes-devtools/qemu/files/0009-target-riscv-rvb-add-CRC-bit-matrix-instructions.patch

@@ -1,262 +0,0 @@
-From c3c43d0d0a746b557ca2cf647c7181f8e8aa9b20 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Fri, 9 Jul 2021 17:27:44 +0800
-Subject: [PATCH 09/11] target/riscv: rvb: add CRC & bit matrix instructions
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/bitmanip_helper.c          | 137 ++++++++++++++++++++++++
- target/riscv/helper.h                   |  11 ++
- target/riscv/insn32.decode              |  11 ++
- target/riscv/insn_trans/trans_rvb.c.inc |  37 +++++++
- 4 files changed, 196 insertions(+)
-
-diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
-index 5e46b69159..7303f42ccb 100644
---- a/target/riscv/bitmanip_helper.c
-+++ b/target/riscv/bitmanip_helper.c
-@@ -440,3 +440,140 @@ target_ulong HELPER(bdecompress)(target_ulong rs1, target_ulong rs2)
- {
-     return do_bdecompress(rs1, rs2, TARGET_LONG_BITS);
- }
-+
-+#define DO_CRC(NAME, VALUE)                             \
-+static target_ulong do_##NAME(target_ulong rs1,         \
-+                             int nbits)                 \
-+{                                                       \
-+    int i;                                              \
-+    target_ulong x = rs1;                               \
-+    for (i = 0; i < nbits; i++)                         \
-+       x = (x >> 1) ^ ((VALUE) & ~((x&1)-1));           \
-+    return x;                                           \
-+}
-+
-+DO_CRC(crc32, 0xEDB88320)
-+DO_CRC(crc32c, 0x82F63B78)
-+
-+target_ulong HELPER(crc32_b)(target_ulong rs1)
-+{
-+    return do_crc32(rs1, 8);
-+}
-+
-+target_ulong HELPER(crc32_h)(target_ulong rs1)
-+{
-+    return do_crc32(rs1, 16);
-+}
-+
-+target_ulong HELPER(crc32_w)(target_ulong rs1)
-+{
-+    return do_crc32(rs1, 32);
-+}
-+
-+target_ulong HELPER(crc32_d)(target_ulong rs1)
-+{
-+    return do_crc32(rs1, 64);
-+}
-+
-+target_ulong HELPER(crc32c_b)(target_ulong rs1)
-+{
-+    return do_crc32c(rs1, 8);
-+}
-+
-+target_ulong HELPER(crc32c_h)(target_ulong rs1)
-+{
-+    return do_crc32c(rs1, 16);
-+}
-+
-+target_ulong HELPER(crc32c_w)(target_ulong rs1)
-+{
-+    return do_crc32c(rs1, 32);
-+}
-+
-+target_ulong HELPER(crc32c_d)(target_ulong rs1)
-+{
-+    return do_crc32c(rs1, 64);
-+}
-+
-+static inline uint64_t popcount(uint64_t val)
-+{
-+    val = (val & 0x5555555555555555U) + ((val >>  1) & 0x5555555555555555U);
-+    val = (val & 0x3333333333333333U) + ((val >>  2) & 0x3333333333333333U);
-+    val = (val & 0x0f0f0f0f0f0f0f0fU) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fU);
-+    val = (val & 0x00ff00ff00ff00ffU) + ((val >>  8) & 0x00ff00ff00ff00ffU);
-+    val = (val & 0x0000ffff0000ffffU) + ((val >> 16) & 0x0000ffff0000ffffU);
-+    val = (val & 0x00000000ffffffffU) + ((val >> 32) & 0x00000000ffffffffU);
-+    return val;
-+}
-+
-+static target_ulong do_bmatflip(target_ulong rs1,
-+                                int bits)
-+{
-+    target_ulong x = rs1;
-+    for (int i = 0; i < 3; i++)
-+        x = do_shfl(x, 31, bits);
-+    return x;
-+}
-+
-+static target_ulong do_bmatxor(target_ulong rs1,
-+                               target_ulong rs2,
-+                               int bits)
-+{
-+    int i;
-+    uint8_t u[8];
-+    uint8_t v[8];
-+    uint64_t x = 0;
-+
-+    target_ulong rs2t = do_bmatflip(rs2, bits);
-+
-+    for (i = 0; i < 8; i++) {
-+        u[i] = rs1 >> (i * 8);
-+        v[i] = rs2t >> (i * 8);
-+    }
-+
-+    for (int i = 0; i < 64; i++) {
-+        if (popcount(u[i / 8] & v[i % 8]) & 1)
-+            x |= 1LL << i;
-+    }
-+
-+    return x;
-+}
-+
-+static target_ulong do_bmator(target_ulong rs1,
-+                              target_ulong rs2,
-+                              int bits)
-+{
-+    int i;
-+    uint8_t u[8];
-+    uint8_t v[8];
-+    uint64_t x = 0;
-+
-+    target_ulong rs2t = do_bmatflip(rs2, bits);
-+
-+    for (i = 0; i < 8; i++) {
-+        u[i] = rs1 >> (i * 8);
-+        v[i] = rs2t >> (i * 8);
-+    }
-+
-+    for (int i = 0; i < 64; i++) {
-+        if ((u[i / 8] & v[i % 8]) != 0)
-+            x |= 1LL << i;
-+    }
-+
-+    return x;
-+}
-+
-+target_ulong HELPER(bmatflip)(target_ulong rs1)
-+{
-+    return do_bmatflip(rs1, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(bmatxor)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_bmatxor(rs1, rs2, TARGET_LONG_BITS);
-+}
-+
-+target_ulong HELPER(bmator)(target_ulong rs1, target_ulong rs2)
-+{
-+    return do_bmator(rs1, rs2, TARGET_LONG_BITS);
-+}
-diff --git a/target/riscv/helper.h b/target/riscv/helper.h
-index 577f9f6811..815609c084 100644
---- a/target/riscv/helper.h
-+++ b/target/riscv/helper.h
-@@ -78,6 +78,17 @@ DEF_HELPER_FLAGS_2(bfp, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(bfpw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(bcompress, TCG_CALL_NO_RWG_SE, tl, tl, tl)
- DEF_HELPER_FLAGS_2(bdecompress, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(bmatxor, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_2(bmator, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-+DEF_HELPER_FLAGS_1(bmatflip, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32_b, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32_h, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32_w, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32_d, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32c_b, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32c_h, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32c_w, TCG_CALL_NO_RWG_SE, tl, tl)
-+DEF_HELPER_FLAGS_1(crc32c_d, TCG_CALL_NO_RWG_SE, tl, tl)
- DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsl, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
- DEF_HELPER_FLAGS_3(fsr, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
-diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
-index 8aaca89ce9..55dc56aa2a 100644
---- a/target/riscv/insn32.decode
-+++ b/target/riscv/insn32.decode
-@@ -670,8 +670,17 @@ vamomaxud_v     11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
- clz        011000 000000 ..... 001 ..... 0010011 @r2
- ctz        011000 000001 ..... 001 ..... 0010011 @r2
- cpop       011000 000010 ..... 001 ..... 0010011 @r2
-+bmatflip   011000 000011 ..... 001 ..... 0010011 @r2
- sext_b     011000 000100 ..... 001 ..... 0010011 @r2
- sext_h     011000 000101 ..... 001 ..... 0010011 @r2
-+crc32_b    0110000 10000 ..... 001 ..... 0010011 @r2
-+crc32_h    0110000 10001 ..... 001 ..... 0010011 @r2
-+crc32_w    0110000 10010 ..... 001 ..... 0010011 @r2
-+crc32_d    0110000 10011 ..... 001 ..... 0010011 @r2
-+crc32c_b   0110000 11000 ..... 001 ..... 0010011 @r2
-+crc32c_h   0110000 11001 ..... 001 ..... 0010011 @r2
-+crc32c_w   0110000 11010 ..... 001 ..... 0010011 @r2
-+crc32c_d   0110000 11011 ..... 001 ..... 0010011 @r2
- 
- andn       0100000 .......... 111 ..... 0110011 @r
- orn        0100000 .......... 110 ..... 0110011 @r
-@@ -711,6 +720,8 @@ fsl        .....10 .......... 001 ..... 0110011 @r3
- fsr        .....10 .......... 101 ..... 0110011 @r3
- bcompress    0000100 .......... 110 ..... 0110011 @r
- bdecompress  0100100 .......... 110 ..... 0110011 @r
-+bmator       0000100 .......... 011 ..... 0110011 @r
-+bmatxor      0100100 .......... 011 ..... 0110011 @r
- 
- fsri       .....1 ........... 101 ..... 0010011 @r3_imm
- bseti      001010 ........... 001 ..... 0010011 @sh6
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index d4374d174e..fde91c1953 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -408,6 +408,43 @@ static bool trans_bdecompressw(DisasContext *ctx, arg_bdecompressw *a)
-     return gen_arith(ctx, a, gen_bdecompressw);
- }
- 
-+#define GEN_TRANS_CRC(NAME)                                     \
-+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a)      \
-+{                                                               \
-+    REQUIRE_EXT(ctx, RVB);                                      \
-+    return gen_unary(ctx, a, gen_helper_##NAME);                \
-+}                                                               \
-+
-+GEN_TRANS_CRC(crc32_b)
-+GEN_TRANS_CRC(crc32_h)
-+GEN_TRANS_CRC(crc32_w)
-+GEN_TRANS_CRC(crc32_d)
-+GEN_TRANS_CRC(crc32c_b)
-+GEN_TRANS_CRC(crc32c_h)
-+GEN_TRANS_CRC(crc32c_w)
-+GEN_TRANS_CRC(crc32c_d)
-+
-+static bool trans_bmatflip(DisasContext *ctx, arg_bmatflip *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_unary(ctx, a, gen_helper_bmatflip);
-+}
-+
-+static bool trans_bmatxor(DisasContext *ctx, arg_bmatxor *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_bmatxor);
-+}
-+
-+static bool trans_bmator(DisasContext *ctx, arg_bmatxor *a)
-+{
-+    REQUIRE_64BIT(ctx);
-+    REQUIRE_EXT(ctx, RVB);
-+    return gen_arith(ctx, a, gen_helper_bmator);
-+}
-+
- static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
- {
-     REQUIRE_64BIT(ctx);
--- 
-2.33.0
-

+ 0 - 69
recipes-devtools/qemu/files/0010-target-riscv-rvb-modified-srow-error.patch

@@ -1,69 +0,0 @@
-From 98a9115f2667b9d029fd83da50c42c15d5e40bd6 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Tue, 13 Jul 2021 13:15:23 +0800
-Subject: [PATCH 10/11] target/riscv: rvb: modified srow error
-
-    srow rd, rs1, rs2
-    modified the error when rs1 >= 0xffffffff
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/insn_trans/trans_rvb.c.inc |  4 ++--
- target/riscv/translate.c                | 20 ++++++++++++++++++++
- 2 files changed, 22 insertions(+), 2 deletions(-)
-
-diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
-index fde91c1953..307fb982e0 100644
---- a/target/riscv/insn_trans/trans_rvb.c.inc
-+++ b/target/riscv/insn_trans/trans_rvb.c.inc
-@@ -547,14 +547,14 @@ static bool trans_srow(DisasContext *ctx, arg_srow *a)
- {
-     REQUIRE_64BIT(ctx);
-     REQUIRE_EXT(ctx, RVB);
--    return gen_shiftw(ctx, a, gen_sro);
-+    return gen_shiftw(ctx, a, gen_srow);
- }
- 
- static bool trans_sroiw(DisasContext *ctx, arg_sroiw *a)
- {
-     REQUIRE_64BIT(ctx);
-     REQUIRE_EXT(ctx, RVB);
--    return gen_shiftiw(ctx, a, gen_sro);
-+    return gen_shiftiw(ctx, a, gen_srow);
- }
- 
- static bool trans_rorw(DisasContext *ctx, arg_rorw *a)
-diff --git a/target/riscv/translate.c b/target/riscv/translate.c
-index 79d93feec3..4d713b16e0 100644
---- a/target/riscv/translate.c
-+++ b/target/riscv/translate.c
-@@ -627,6 +627,26 @@ static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2)
-     tcg_gen_not_tl(ret, ret);
- }
- 
-+static void gen_srow(TCGv ret, TCGv arg1, TCGv arg2)
-+{
-+    TCGv_i32 t1 = tcg_temp_new_i32();
-+    TCGv_i32 t2 = tcg_temp_new_i32();
-+
-+    /* truncate to 32-bits */
-+    tcg_gen_trunc_tl_i32(t1, arg1);
-+    tcg_gen_trunc_tl_i32(t2, arg2);
-+
-+    tcg_gen_not_i32(t1, t1);
-+    tcg_gen_shr_i32(t1, t1, t2);
-+    tcg_gen_not_i32(t1, t1);
-+
-+    /* sign-extend 64-bits */
-+    tcg_gen_ext_i32_tl(ret, t1);
-+
-+    tcg_temp_free_i32(t1);
-+    tcg_temp_free_i32(t2);
-+}
-+
- static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
- {
-     TCGv source1 = tcg_temp_new();
--- 
-2.33.0
-

+ 0 - 29
recipes-devtools/qemu/files/0011-support-b-extention-on-default-config.patch

@@ -1,29 +0,0 @@
-From 324b218c168ea816bb477127a2fe78d8d17de5b7 Mon Sep 17 00:00:00 2001
-From: "eric.tang" <eric.tang@starfivetech.com>
-Date: Thu, 29 Jul 2021 09:26:34 +0800
-Subject: [PATCH 11/11] support b-extention on default config
-
-Signed-off-by: eric.tang <eric.tang@starfivetech.com>
----
- target/riscv/cpu.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
-index 991a6bb760..95032fcb9f 100644
---- a/target/riscv/cpu.c
-+++ b/target/riscv/cpu.c
-@@ -153,9 +153,9 @@ static void riscv_any_cpu_init(Object *obj)
- {
-     CPURISCVState *env = &RISCV_CPU(obj)->env;
- #if defined(TARGET_RISCV32)
--    set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVU);
-+    set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU);
- #elif defined(TARGET_RISCV64)
--    set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVU);
-+    set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU);
- #endif
-     set_priv_version(env, PRIV_VERSION_1_11_0);
- }
--- 
-2.33.0
-

+ 0 - 0
recipes-devtools/qemu/qemu-native_6.1.0.bb → recipes-devtools/qemu/qemu-native_6.0.0.bb


+ 1 - 4
recipes-devtools/qemu/qemu-system-native_6.1.0.bb → recipes-devtools/qemu/qemu-system-native_6.0.0.bb

@@ -25,10 +25,7 @@ do_install_append() {
     rm -f ${D}${datadir}/qemu/trace-events-all
     rm -rf ${D}${datadir}/qemu/keymaps
     rm -rf ${D}${datadir}/icons/
-    rm -rf ${D}${includedir}/qemu-plugin.h
 
     # Install qmp.py to be used with testimage
-    mkdir -p ${D}${PYTHON_SITEPACKAGES_DIR}/qmp
-
-    install -D ${S}/python/qemu/qmp/* ${D}${PYTHON_SITEPACKAGES_DIR}/qmp/
+    install -D ${S}/python/qemu/qmp.py ${D}${PYTHON_SITEPACKAGES_DIR}/qmp.py
 }

+ 133 - 17
recipes-devtools/qemu/qemu.inc

@@ -14,28 +14,144 @@ inherit pkgconfig ptest
 LIC_FILES_CHKSUM = "file://COPYING;md5=441c28d2cf86e15a37fa47e15a72fbac \
                     file://COPYING.LIB;endline=24;md5=8c5efda6cf1e1b03dcfd0e6c0d271c7f"
 
-S = "${WORKDIR}/git"
-SRCREV = "711c0418c8c1ce3a24346f058b001c4c5a2f0f81"
-SRC_URI = "gitsm://github.com/qemu/qemu.git; \
+SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
            file://powerpc_rom.bin \
            file://run-ptest \
-           file://0001-target-riscv-rvb-Carry-less-multiply-instruction.patch \
-           file://0002-target-riscv-rvb-add-cmix-cmov-instruction.patch \
-           file://0003-target-riscv-rvb-add-funnel-shfit-instruction.patch \
-           file://0004-target-riscv-rvb-add-generalized-shuffle.patch \
-           file://0005-target-riscv-rvb-add-crossbar-permutation-instructio.patch \
-           file://0006-target-riscv-rvb-add-bfp-bfpw-instructions.patch \
-           file://0007-target-riscv-rvb-modified-some-errors-on-some-rv64-o.patch \
-           file://0008-target-riscv-rvb-add-bcompress-bdecompress-instructi.patch \
-           file://0009-target-riscv-rvb-add-CRC-bit-matrix-instructions.patch \
-           file://0010-target-riscv-rvb-modified-srow-error.patch \
-           file://0011-support-b-extention-on-default-config.patch \
-	   "
-
+           file://0001-qemu-Add-missing-wacom-HID-descriptor.patch \
+           file://0003-qemu-Add-addition-environment-space-to-boot-loader-q.patch \
+           file://0004-qemu-disable-Valgrind.patch \
+           file://0006-chardev-connect-socket-to-a-spawned-command.patch \
+           file://0007-apic-fixup-fallthrough-to-PIC.patch \
+           file://0010-configure-Add-pkg-config-handling-for-libgcrypt.patch \
+           file://0001-Add-enable-disable-udev.patch \
+           file://0001-qemu-Do-not-include-file-if-not-exists.patch \
+           file://mmap2.patch \
+           file://determinism.patch \
+           file://0001-tests-meson.build-use-relative-path-to-refer-to-file.patch \
+           file://0001-configure-fix-detection-of-gdbus-codegen.patch \
+           file://0001-vhost-user-gpu-fix-memory-disclosure-in-virgl_cmd_ge.patch \
+           file://0002-vhost-user-gpu-fix-resource-leak-in-vg_resource_crea.patch \
+           file://0003-vhost-user-gpu-fix-memory-leak-in-vg_resource_attach.patch \
+           file://0004-vhost-user-gpu-fix-memory-leak-while-calling-vg_reso.patch \
+           file://0005-vhost-user-gpu-fix-memory-leak-in-virgl_cmd_resource.patch \
+           file://0006-vhost-user-gpu-fix-memory-leak-in-virgl_resource_att.patch \
+           file://0007-vhost-user-gpu-fix-OOB-write-in-virgl_cmd_get_capset.patch \
+           file://0001-linux-user-Tag-vsx-with-ieee128-fpbits.patch \
+           file://CVE-2021-3527-1.patch \
+           file://CVE-2021-3527-2.patch \
+           file://0001-softfloat-add-APIs-to-handle-alternative-sNaN-propag.patch \
+           file://0002-target-riscv-change-the-api-for-single-double-fmin-f.patch \
+           file://0003-target-riscv-support-x-Zfh-in-cpu-option.patch \
+           file://0004-target-riscv-Implement-zfh-extension.patch \
+           file://0005-target-riscv-fix-TB_FLAGS-bits-overlapping-bug-for-r.patch \
+           file://0006-fpu-softfloat-set-invalid-excp-flag-for-RISC-V-mulad.patch \
+           file://0007-target-riscv-Fixup-saturate-subtract-function.patch \
+           file://0008-target-riscv-fix-vrgather-macro-index-variable-type-.patch \
+           file://0009-target-riscv-drop-vector-0.7.1-and-add-1.0-support.patch \
+           file://0010-target-riscv-Use-FIELD_EX32-to-extract-wd-field.patch \
+           file://0011-target-riscv-rvv-1.0-add-mstatus-VS-field.patch \
+           file://0012-target-riscv-rvv-1.0-add-sstatus-VS-field.patch \
+           file://0013-target-riscv-rvv-1.0-introduce-writable-misa.v-field.patch \
+           file://0014-target-riscv-rvv-1.0-add-translation-time-vector-con.patch \
+           file://0015-target-riscv-rvv-1.0-remove-rvv-related-codes-from-f.patch \
+           file://0016-target-riscv-rvv-1.0-add-vcsr-register.patch \
+           file://0017-target-riscv-rvv-1.0-add-vlenb-register.patch \
+           file://0018-target-riscv-rvv-1.0-check-MSTATUS_VS-when-accessing.patch \
+           file://0019-target-riscv-rvv-1.0-remove-MLEN-calculations.patch \
+           file://0020-target-riscv-rvv-1.0-add-fractional-LMUL.patch \
+           file://0021-target-riscv-rvv-1.0-add-VMA-and-VTA.patch \
+           file://0022-target-riscv-rvv-1.0-update-check-functions.patch \
+           file://0023-target-riscv-introduce-more-imm-value-modes-in-trans.patch \
+           file://0024-target-riscv-rvv-1.0-add-translation-time-nan-box-he.patch \
+           file://0025-target-riscv-rvv-1.0-configure-instructions.patch \
+           file://0026-target-riscv-rvv-1.0-stride-load-and-store-instructi.patch \
+           file://0027-target-riscv-rvv-1.0-index-load-and-store-instructio.patch \
+           file://0028-target-riscv-rvv-1.0-fix-address-index-overflow-bug-.patch \
+           file://0029-target-riscv-rvv-1.0-fault-only-first-unit-stride-lo.patch \
+           file://0030-target-riscv-rvv-1.0-amo-operations.patch \
+           file://0031-target-riscv-rvv-1.0-load-store-whole-register-instr.patch \
+           file://0032-target-riscv-rvv-1.0-update-vext_max_elems-for-load-.patch \
+           file://0033-target-riscv-rvv-1.0-take-fractional-LMUL-into-vecto.patch \
+           file://0034-target-riscv-rvv-1.0-floating-point-square-root-inst.patch \
+           file://0035-target-riscv-rvv-1.0-floating-point-classify-instruc.patch \
+           file://0036-target-riscv-rvv-1.0-mask-population-count-instructi.patch \
+           file://0037-target-riscv-rvv-1.0-find-first-set-mask-bit-instruc.patch \
+           file://0038-target-riscv-rvv-1.0-set-X-first-mask-bit-instructio.patch \
+           file://0039-target-riscv-rvv-1.0-iota-instruction.patch \
+           file://0040-target-riscv-rvv-1.0-element-index-instruction.patch \
+           file://0041-target-riscv-rvv-1.0-allow-load-element-with-sign-ex.patch \
+           file://0042-target-riscv-rvv-1.0-register-gather-instructions.patch \
+           file://0043-target-riscv-rvv-1.0-integer-scalar-move-instruction.patch \
+           file://0044-target-riscv-rvv-1.0-floating-point-move-instruction.patch \
+           file://0045-target-riscv-rvv-1.0-floating-point-scalar-move-inst.patch \
+           file://0046-target-riscv-rvv-1.0-whole-register-move-instruction.patch \
+           file://0047-target-riscv-rvv-1.0-integer-extension-instructions.patch \
+           file://0048-target-riscv-rvv-1.0-single-width-averaging-add-and-.patch \
+           file://0049-target-riscv-rvv-1.0-single-width-bit-shift-instruct.patch \
+           file://0050-target-riscv-rvv-1.0-integer-add-with-carry-subtract.patch \
+           file://0051-target-riscv-rvv-1.0-narrowing-integer-right-shift-i.patch \
+           file://0052-target-riscv-rvv-1.0-widening-integer-multiply-add-i.patch \
+           file://0053-target-riscv-rvv-1.0-single-width-saturating-add-and.patch \
+           file://0054-target-riscv-rvv-1.0-integer-comparison-instructions.patch \
+           file://0055-target-riscv-rvv-1.0-floating-point-compare-instruct.patch \
+           file://0056-target-riscv-rvv-1.0-mask-register-logical-instructi.patch \
+           file://0057-target-riscv-rvv-1.0-slide-instructions.patch \
+           file://0058-target-riscv-rvv-1.0-floating-point-slide-instructio.patch \
+           file://0059-target-riscv-rvv-1.0-narrowing-fixed-point-clip-inst.patch \
+           file://0060-target-riscv-rvv-1.0-single-width-floating-point-red.patch \
+           file://0061-target-riscv-rvv-1.0-widening-floating-point-reducti.patch \
+           file://0062-target-riscv-rvv-1.0-single-width-scaling-shift-inst.patch \
+           file://0063-target-riscv-rvv-1.0-remove-widening-saturating-scal.patch \
+           file://0064-target-riscv-rvv-1.0-remove-vmford.vv-and-vmford.vf.patch \
+           file://0065-target-riscv-rvv-1.0-remove-integer-extract-instruct.patch \
+           file://0066-target-riscv-rvv-1.0-floating-point-min-max-instruct.patch \
+           file://0067-target-riscv-introduce-floating-point-rounding-mode-.patch \
+           file://0068-target-riscv-rvv-1.0-floating-point-integer-type-con.patch \
+           file://0069-target-riscv-rvv-1.0-widening-floating-point-integer.patch \
+           file://0070-target-riscv-add-set-round-to-odd-rounding-mode-help.patch \
+           file://0071-target-riscv-rvv-1.0-narrowing-floating-point-intege.patch \
+           file://0072-target-riscv-rvv-1.0-relax-RV_VLEN_MAX-to-1024-bits.patch \
+           file://0073-target-riscv-rvv-1.0-implement-vstart-CSR.patch \
+           file://0074-target-riscv-rvv-1.0-trigger-illegal-instruction-exc.patch \
+           file://0075-target-riscv-rvv-1.0-set-mstatus.SD-bit-when-writing.patch \
+           file://0076-target-riscv-gdb-support-vector-registers-for-rv64-r.patch \
+           file://0077-target-riscv-rvv-1.0-floating-point-reciprocal-squar.patch \
+           file://0078-target-riscv-rvv-1.0-floating-point-reciprocal-estim.patch \
+           file://0079-target-riscv-set-mstatus.SD-bit-when-writing-fp-CSRs.patch \
+           file://0080-target-riscv-rvv-1.0-rename-r2_zimm-to-r2_zimm11.patch \
+           file://0081-target-riscv-rvv-1.0-add-vsetivli-instruction.patch \
+           file://0082-target-riscv-rvv-1.0-add-evl-parameter-to-vext_ldst_.patch \
+           file://0083-target-riscv-rvv-1.0-add-vector-unit-stride-mask-loa.patch \
+           file://0084-target-riscv-rvv-1.0-patch-floating-point-reduction-.patch \
+           file://0085-target-riscv-reformat-sh-format-encoding-for-B-exten.patch \
+           file://0086-target-riscv-rvb-count-leading-trailing-zeros.patch \
+           file://0087-target-riscv-rvb-count-bits-set.patch \
+           file://0088-target-riscv-rvb-logic-with-negate.patch \
+           file://0089-target-riscv-rvb-pack-two-words-into-one-register.patch \
+           file://0090-target-riscv-rvb-min-max-instructions.patch \
+           file://0091-target-riscv-rvb-sign-extend-instructions.patch \
+           file://0092-target-riscv-add-gen_shifti-and-gen_shiftiw-helper-f.patch \
+           file://0093-target-riscv-rvb-single-bit-instructions.patch \
+           file://0094-target-riscv-rvb-shift-ones.patch \
+           file://0095-target-riscv-rvb-rotate-left-right.patch \
+           file://0096-target-riscv-rvb-generalized-reverse.patch \
+           file://0097-target-riscv-rvb-generalized-or-combine.patch \
+           file://0098-target-riscv-rvb-address-calculation.patch \
+           file://0099-target-riscv-rvb-add-shift-with-prefix-zero-extend.patch \
+           file://0100-target-riscv-rvb-support-and-turn-on-B-extension-fro.patch \
+           file://0101-target-riscv-rvb-add-b-ext-version-cpu-option.patch \
+           file://0102-target-riscv-fix-REQUIRE_ZFH-macro-bug.patch \
+           file://0103-linux-user-elfload-Implement-ELF_HWCAP-for-RISC-V.patch \
+           file://0104-target-riscv-Pass-the-same-value-to-oprsz-and-maxsz.patch \
+           file://0105-target-riscv-Backup-restore-mstatus.SD-bit-when-virt.patch \
+           file://0106-target-riscv-Force-to-set-mstatus_hs.-SD-FS-bits-in-.patch \
+           file://0107-target-riscv-Force-to-set-mstatus_hs.-SD-VS-bits-in-.patch \
+           file://0001-merge-riscv-bitmapip-b0p94-version.patch \
+           file://0002-Add-four-cache-csr-instruction.patch \
+           "
 UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar"
 
 SRC_URI[sha256sum] = "87bc1a471ca24b97e7005711066007d443423d19aacda3d442558ae032fa30b9"
-SRC_URI[sha256sum] = "610c6063177fd3cb9aaa9bd9863df02cf822fa305daefe9166df86b5d90c36d9"
 
 SRC_URI_append_class-target = " file://cross.patch"
 SRC_URI_append_class-nativesdk = " file://cross.patch"

+ 1177 - 0
recipes-devtools/qemu/qemu/0001-merge-riscv-bitmapip-b0p94-version.patch

@@ -0,0 +1,1177 @@
+From aff71be69f74519436b9c03ebe17da66934c9b4c Mon Sep 17 00:00:00 2001
+From: "eric.tang" <eric.tang@starfivetech.com>
+Date: Mon, 27 Sep 2021 16:15:42 +0800
+Subject: [PATCH 1/2] merge riscv bitmapip b0p94 version
+
+Signed-off-by: eric.tang <eric.tang@starfivetech.com>
+---
+ target/riscv/bitmanip_helper.c          | 492 +++++++++++++++++++++++-
+ target/riscv/cpu.c                      |   2 +-
+ target/riscv/helper.h                   |  31 ++
+ target/riscv/insn32-64.decode           |  21 +-
+ target/riscv/insn32.decode              |  63 ++-
+ target/riscv/insn_trans/trans_rvb.c.inc | 211 +++++++++-
+ target/riscv/translate.c                | 126 +++++-
+ 7 files changed, 913 insertions(+), 33 deletions(-)
+
+diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
+index 389b52eccd..f8de197a67 100644
+--- a/target/riscv/bitmanip_helper.c
++++ b/target/riscv/bitmanip_helper.c
+@@ -90,13 +90,499 @@ target_ulong HELPER(gorc)(target_ulong rs1, target_ulong rs2)
+ {
+     return do_gorc(rs1, rs2, TARGET_LONG_BITS);
+ }
+-
+ /* RV64-only instruction */
+ #ifdef TARGET_RISCV64
+-
+ target_ulong HELPER(gorcw)(target_ulong rs1, target_ulong rs2)
+ {
+     return do_gorc(rs1, rs2, 32);
+ }
+-
+ #endif
++
++#define DO_CLMULA(NAME, NUM, BODY)                          \
++static target_ulong do_##NAME(target_ulong rs1,             \
++                              target_ulong rs2,             \
++                              int bits)                     \
++{                                                           \
++    target_ulong x = 0;                                     \
++    int i;                                                  \
++                                                            \
++    for(i = NUM; i < bits; i++)                             \
++        if ((rs2 >> i) & 1)                                 \
++            x ^= BODY;                                      \
++                                                            \
++    return x;                                               \
++}
++
++DO_CLMULA(clmul, 0, (rs1 << i))
++DO_CLMULA(clmulh, 1, (rs1 >> (bits - i)))
++DO_CLMULA(clmulr, 0, (rs1 >> (bits - i - 1)))
++
++target_ulong HELPER(clmul)(target_ulong rs1, target_ulong rs2)
++{
++    return do_clmul(rs1, rs2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(clmulh)(target_ulong rs1, target_ulong rs2)
++{
++    return do_clmulh(rs1, rs2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(clmulr)(target_ulong rs1, target_ulong rs2)
++{
++    return do_clmulr(rs1, rs2, TARGET_LONG_BITS);
++}
++
++static target_ulong do_cmov(target_ulong rs1,
++                            target_ulong rs2,
++                            target_ulong rs3)
++{
++    return rs2 ? rs1 : rs3;
++}
++
++target_ulong HELPER(cmov)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
++{
++    return do_cmov(rs1, rs2, rs3);
++}
++
++static target_ulong do_fsl(target_ulong rs1,
++                           target_ulong rs2,
++                           target_ulong rs3,
++                           int bits)
++{
++    int shamt = rs2 & (2*bits - 1);
++    target_ulong a = rs1, b = rs3;
++
++    if (shamt >= bits) {
++        shamt -= bits;
++        a = rs3;
++        b = rs1;
++    }
++
++    return shamt ? (a << shamt) | (b >> (bits - shamt)) : a;
++}
++
++target_ulong HELPER(fsl)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
++{
++    return do_fsl(rs1, rs2, rs3, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(fsr)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
++{
++    return do_fsl(rs1, -rs2, rs3, TARGET_LONG_BITS);
++}
++
++static target_ulong do_fslw(target_ulong rs1,
++                            target_ulong rs2,
++                            target_ulong rs3)
++{
++    int shamt = rs2 & 63;
++    target_ulong a = rs1, b = rs3;
++
++    if (shamt >= 32) {
++        shamt -= 32;
++        a = rs3;
++        b = rs1;
++    }
++
++    return shamt ? (a << shamt) | ((b & 0xffffffff) >> (32 - shamt)) : a;
++}
++
++target_ulong HELPER(fslw)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
++{
++    return do_fslw(rs1, rs2, rs3);
++}
++
++target_ulong HELPER(fsrw)(target_ulong rs1, target_ulong rs2, target_ulong rs3)
++{
++    return do_fslw(rs1, -rs2, rs3);
++}
++
++static target_ulong do_shfl(target_ulong rs1,
++                            target_ulong rs2,
++                            int bits)
++{
++    target_ulong x = rs1;
++    int shamt = rs2 & ((bits-1) >> 1);
++    if (shamt & 16)
++        x = (x & 0xFFFF00000000FFFFLL) |
++            ((x & 0x0000FFFF00000000LL) >> 16) |
++            ((x & 0x00000000FFFF0000LL) << 16);
++    if (shamt &  8)
++        x = (x & 0xFF0000FFFF0000FFLL) |
++            ((x & 0x00FF000000FF0000LL) >>  8) |
++            ((x & 0x0000FF000000FF00LL) <<  8);
++    if (shamt &  4)
++        x = (x & 0xF00FF00FF00FF00FLL) |
++            ((x & 0x0F000F000F000F00LL) >>  4) |
++            ((x & 0x00F000F000F000F0LL) <<  4);
++    if (shamt &  2)
++        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
++            ((x & 0x3030303030303030LL) >>  2) |
++            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
++    if (shamt &  1)
++        x = (x & 0x9999999999999999LL) |
++            ((x & 0x4444444444444444LL) >>  1) |
++            ((x & 0x2222222222222222LL) <<  1);
++
++    return x;
++}
++
++static target_ulong do_unshfl(target_ulong rs1,
++                              target_ulong rs2,
++                              int bits)
++{
++    target_ulong x = rs1;
++
++    int shamt = rs2 & ((bits-1) >> 1);
++    if (shamt &  1)
++        x = (x & 0x9999999999999999LL) |
++            ((x & 0x4444444444444444LL) >>  1) |
++            ((x & 0x2222222222222222LL) <<  1);
++    if (shamt &  2)
++        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
++            ((x & 0x3030303030303030LL) >>  2) |
++            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
++    if (shamt &  4)
++        x = (x & 0xF00FF00FF00FF00FLL) |
++            ((x & 0x0F000F000F000F00LL) >>  4) |
++            ((x & 0x00F000F000F000F0LL) <<  4);
++    if (shamt &  8)
++        x = (x & 0xFF0000FFFF0000FFLL) |
++            ((x & 0x00FF000000FF0000LL) >>  8) |
++            ((x & 0x0000FF000000FF00LL) <<  8);
++    if (shamt & 16)
++        x = (x & 0xFFFF00000000FFFFLL) |
++            ((x & 0x0000FFFF00000000LL) >> 16) |
++            ((x & 0x00000000FFFF0000LL) << 16);
++
++    return x;
++}
++
++static target_ulong do_shflw(target_ulong rs1,
++                             target_ulong rs2)
++{
++    target_ulong x = rs1;
++    int shamt = rs2 & 15;
++    if (shamt & 8)
++        x = (x & 0xFF0000FFFF0000FFLL) |
++            ((x & 0x00FF000000FF0000LL) >>  8) |
++            ((x & 0x0000FF000000FF00LL) <<  8);
++    if (shamt & 4)
++        x = (x & 0xF00FF00FF00FF00FLL) |
++            ((x & 0x0F000F000F000F00LL) >>  4) |
++            ((x & 0x00F000F000F000F0LL) <<  4);
++    if (shamt & 2)
++        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
++            ((x & 0x3030303030303030LL) >>  2) |
++            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
++    if (shamt & 1)
++        x = (x & 0x9999999999999999LL) |
++            ((x & 0x4444444444444444LL) >>  1) |
++            ((x & 0x2222222222222222LL) <<  1);
++
++    return x;
++}
++
++static target_ulong do_unshflw(target_ulong rs1,
++                               target_ulong rs2)
++{
++    target_ulong x = rs1;
++    int shamt = rs2 & 15;
++    if (shamt & 1)
++        x = (x & 0x9999999999999999LL) |
++            ((x & 0x4444444444444444LL) >>  1) |
++            ((x & 0x2222222222222222LL) <<  1);
++    if (shamt & 2)
++        x = (x & 0xC3C3C3C3C3C3C3C3LL) |
++            ((x & 0x3030303030303030LL) >>  2) |
++            ((x & 0x0C0C0C0C0C0C0C0CLL) <<  2);
++    if (shamt & 4)
++        x = (x & 0xF00FF00FF00FF00FLL) |
++            ((x & 0x0F000F000F000F00LL) >>  4) |
++            ((x & 0x00F000F000F000F0LL) <<  4);
++    if (shamt & 8)
++        x = (x & 0xFF0000FFFF0000FFLL) |
++            ((x & 0x00FF000000FF0000LL) >>  8) |
++            ((x & 0x0000FF000000FF00LL) <<  8);
++
++    return x;
++}
++
++target_ulong HELPER(shfl)(target_ulong rs1, target_ulong rs2)
++{
++    return do_shfl(rs1, rs2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(unshfl)(target_ulong rs1, target_ulong rs2)
++{
++    return do_unshfl(rs1, rs2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(shflw)(target_ulong rs1, target_ulong rs2)
++{
++    return do_shflw(rs1, rs2);
++}
++
++target_ulong HELPER(unshflw)(target_ulong rs1, target_ulong rs2)
++{
++    return do_unshflw(rs1, rs2);
++}
++
++static target_ulong do_xperm(target_ulong rs1,
++                             target_ulong rs2,
++                             int sz_log2,
++                             int bits)
++{
++    target_ulong pos = 0;
++    target_ulong r = 0;
++    target_ulong sz = 1LL << sz_log2;
++    target_ulong mask = (1LL << sz) - 1;
++    int i;
++    for (i = 0; i < bits; i += sz) {
++        pos = ((rs2 >> i) & mask) << sz_log2;
++        if (pos < bits)
++            r |= ((rs1 >> pos) & mask) << i;
++    }
++
++    return r;
++}
++
++target_ulong HELPER(xperm_n)(target_ulong rs1, target_ulong rs2)
++{
++    return do_xperm(rs1, rs2, 2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(xperm_b)(target_ulong rs1, target_ulong rs2)
++{
++    return do_xperm(rs1, rs2, 3, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(xperm_h)(target_ulong rs1, target_ulong rs2)
++{
++    return do_xperm(rs1, rs2, 4, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(xperm_w)(target_ulong rs1, target_ulong rs2)
++{
++    return do_xperm(rs1, rs2, 5, TARGET_LONG_BITS);
++}
++
++static target_ulong do_bfp(target_ulong rs1,
++                           target_ulong rs2,
++                           int bits)
++{
++    target_ulong cfg = rs2 >> (bits/2);
++    if ((cfg >> 30) == 2)
++        cfg = cfg >> 16;
++    int len = (cfg >> 8) & (bits/2 - 1);
++    int off = cfg & (bits - 1);
++    len = len ? len : bits/2;
++    target_ulong mask = ~(~(target_ulong)0 << len) << off;
++    target_ulong data = rs2 << off;
++
++    return (data & mask) | (rs1 & ~mask);
++}
++
++static target_ulong do_bfpw(target_ulong rs1,
++                            target_ulong rs2)
++{
++    target_ulong cfg = rs2 >> 16;
++    int len = (cfg >> 8) & 15;
++    int off = cfg & 31;
++    len = len ? len : 16;
++    target_ulong mask = ~(~(target_ulong)(0) << len) << off;
++    target_ulong data = rs2 << off;
++
++    return (data & mask) | (rs1 & ~mask);
++}
++
++target_ulong HELPER(bfp)(target_ulong rs1, target_ulong rs2)
++{
++    return do_bfp(rs1, rs2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(bfpw)(target_ulong rs1, target_ulong rs2)
++{
++    return do_bfpw(rs1, rs2);
++}
++
++static target_ulong do_bcompress(target_ulong rs1,
++                                 target_ulong rs2,
++                                 int bits)
++{
++    target_ulong r = 0;
++    int i, j = 0;
++    for (i = 0; i < bits; i++) {
++        if ((rs2 >> i) & 1) {
++            if ((rs1 >> i) & 1)
++                r |= (target_ulong)1 << j;
++            j++;
++        }
++    }
++
++    return r;
++}
++
++static target_ulong do_bdecompress(target_ulong rs1,
++                                   target_ulong rs2,
++                                   int bits)
++{
++    target_ulong r = 0;
++    int i, j = 0;
++    for (i = 0; i < bits; i++) {
++        if ((rs2 >> i) & 1) {
++            if ((rs1 >> j) & 1)
++                r |= (target_ulong)1 << i;
++            j++;
++        }
++    }
++
++    return r;
++}
++
++target_ulong HELPER(bcompress)(target_ulong rs1, target_ulong rs2)
++{
++    return do_bcompress(rs1, rs2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(bdecompress)(target_ulong rs1, target_ulong rs2)
++{
++    return do_bdecompress(rs1, rs2, TARGET_LONG_BITS);
++}
++
++#define DO_CRC(NAME, VALUE)                             \
++static target_ulong do_##NAME(target_ulong rs1,         \
++                             int nbits)                 \
++{                                                       \
++    int i;                                              \
++    target_ulong x = rs1;                               \
++    for (i = 0; i < nbits; i++)                         \
++       x = (x >> 1) ^ ((VALUE) & ~((x&1)-1));           \
++    return x;                                           \
++}
++
++DO_CRC(crc32, 0xEDB88320)
++DO_CRC(crc32c, 0x82F63B78)
++
++target_ulong HELPER(crc32_b)(target_ulong rs1)
++{
++    return do_crc32(rs1, 8);
++}
++
++target_ulong HELPER(crc32_h)(target_ulong rs1)
++{
++    return do_crc32(rs1, 16);
++}
++
++target_ulong HELPER(crc32_w)(target_ulong rs1)
++{
++    return do_crc32(rs1, 32);
++}
++
++target_ulong HELPER(crc32_d)(target_ulong rs1)
++{
++    return do_crc32(rs1, 64);
++}
++
++target_ulong HELPER(crc32c_b)(target_ulong rs1)
++{
++    return do_crc32c(rs1, 8);
++}
++
++target_ulong HELPER(crc32c_h)(target_ulong rs1)
++{
++    return do_crc32c(rs1, 16);
++}
++
++target_ulong HELPER(crc32c_w)(target_ulong rs1)
++{
++    return do_crc32c(rs1, 32);
++}
++
++target_ulong HELPER(crc32c_d)(target_ulong rs1)
++{
++    return do_crc32c(rs1, 64);
++}
++
++static inline uint64_t popcount(uint64_t val)
++{
++    val = (val & 0x5555555555555555U) + ((val >>  1) & 0x5555555555555555U);
++    val = (val & 0x3333333333333333U) + ((val >>  2) & 0x3333333333333333U);
++    val = (val & 0x0f0f0f0f0f0f0f0fU) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fU);
++    val = (val & 0x00ff00ff00ff00ffU) + ((val >>  8) & 0x00ff00ff00ff00ffU);
++    val = (val & 0x0000ffff0000ffffU) + ((val >> 16) & 0x0000ffff0000ffffU);
++    val = (val & 0x00000000ffffffffU) + ((val >> 32) & 0x00000000ffffffffU);
++    return val;
++}
++
++static target_ulong do_bmatflip(target_ulong rs1,
++                                int bits)
++{
++    target_ulong x = rs1;
++    for (int i = 0; i < 3; i++)
++        x = do_shfl(x, 31, bits);
++    return x;
++}
++
++static target_ulong do_bmatxor(target_ulong rs1,
++                               target_ulong rs2,
++                               int bits)
++{
++    int i;
++    uint8_t u[8];
++    uint8_t v[8];
++    uint64_t x = 0;
++
++    target_ulong rs2t = do_bmatflip(rs2, bits);
++
++    for (i = 0; i < 8; i++) {
++        u[i] = rs1 >> (i * 8);
++        v[i] = rs2t >> (i * 8);
++    }
++
++    for (int i = 0; i < 64; i++) {
++        if (popcount(u[i / 8] & v[i % 8]) & 1)
++            x |= 1LL << i;
++    }
++
++    return x;
++}
++
++static target_ulong do_bmator(target_ulong rs1,
++                              target_ulong rs2,
++                              int bits)
++{
++    int i;
++    uint8_t u[8];
++    uint8_t v[8];
++    uint64_t x = 0;
++
++    target_ulong rs2t = do_bmatflip(rs2, bits);
++
++    for (i = 0; i < 8; i++) {
++        u[i] = rs1 >> (i * 8);
++        v[i] = rs2t >> (i * 8);
++    }
++
++    for (int i = 0; i < 64; i++) {
++        if ((u[i / 8] & v[i % 8]) != 0)
++            x |= 1LL << i;
++    }
++
++    return x;
++}
++
++target_ulong HELPER(bmatflip)(target_ulong rs1)
++{
++    return do_bmatflip(rs1, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(bmatxor)(target_ulong rs1, target_ulong rs2)
++{
++    return do_bmatxor(rs1, rs2, TARGET_LONG_BITS);
++}
++
++target_ulong HELPER(bmator)(target_ulong rs1, target_ulong rs2)
++{
++    return do_bmator(rs1, rs2, TARGET_LONG_BITS);
++}
+diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
+index b6713855d6..1b22fb0f3a 100644
+--- a/target/riscv/cpu.c
++++ b/target/riscv/cpu.c
+@@ -152,7 +152,7 @@ static void set_resetvec(CPURISCVState *env, int resetvec)
+ static void riscv_any_cpu_init(Object *obj)
+ {
+     CPURISCVState *env = &RISCV_CPU(obj)->env;
+-    set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVU);
++    set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU);
+     set_priv_version(env, PRIV_VERSION_1_11_0);
+ }
+ 
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 9bbf93d47e..9f0abef25f 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -100,6 +100,37 @@ DEF_HELPER_FLAGS_2(gorc, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+ DEF_HELPER_FLAGS_2(grevw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+ DEF_HELPER_FLAGS_2(gorcw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+ #endif
++DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(clmulh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(shfl, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(unshfl, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(shflw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(unshflw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(xperm_n, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(xperm_b, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(xperm_h, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(xperm_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(bfp, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(bfpw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(bcompress, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(bdecompress, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(bmatxor, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_2(bmator, TCG_CALL_NO_RWG_SE, tl, tl, tl)
++DEF_HELPER_FLAGS_1(bmatflip, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32_b, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32_h, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32_w, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32_d, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32c_b, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32c_h, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32c_w, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_1(crc32c_d, TCG_CALL_NO_RWG_SE, tl, tl)
++DEF_HELPER_FLAGS_3(cmov, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
++DEF_HELPER_FLAGS_3(fsl, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
++DEF_HELPER_FLAGS_3(fsr, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
++DEF_HELPER_FLAGS_3(fslw, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
++DEF_HELPER_FLAGS_3(fsrw, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
+ 
+ /* Special functions */
+ DEF_HELPER_3(csrrw, tl, env, tl, tl)
+diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode
+index 2c3313531f..b5ab197edc 100644
+--- a/target/riscv/insn32-64.decode
++++ b/target/riscv/insn32-64.decode
+@@ -21,7 +21,11 @@
+ 
+ %sh5    20:5
+ 
+-@sh5     .......  ..... .....  ... ..... ....... &shift  shamt=%sh5      %rs1 %rd
++
++# Formats 64:
++@sh      ......  ...... .....  ... ..... ....... &shift  shamt=%sh7     %rs1 %rd
++@sh5     .......  ..... .....  ... ..... ....... &shift  shamt=%sh5     %rs1 %rd
++@r3w_imm ..... .  ...... ..... ... ..... ....... &r3i %rs3 imm=%sh5 	%rs1 %rd
+ 
+ # *** RV64I Base Instruction Set (in addition to RV32I) ***
+ lwu      ............   ..... 110 ..... 0000011 @i
+@@ -93,11 +97,17 @@ fcvt_lu_h  1100010  00011 ..... ... ..... 1010011 @r2_rm
+ fcvt_h_l   1101010  00010 ..... ... ..... 1010011 @r2_rm
+ fcvt_h_lu  1101010  00011 ..... ... ..... 1010011 @r2_rm
+ 
++
+ # *** RV64B Standard Extension (in addition to RV32B) ***
+ clzw       0110000 00000 ..... 001 ..... 0011011 @r2
+ ctzw       0110000 00001 ..... 001 ..... 0011011 @r2
+ cpopw      0110000 00010 ..... 001 ..... 0011011 @r2
++crc32_d    0110000 10011 ..... 001 ..... 0010011 @r2
++crc32c_d   0110000 11011 ..... 001 ..... 0010011 @r2
++bmatflip   011000 000011 ..... 001 ..... 0010011 @r2
+ 
++bmator     0000100 .......... 011 ..... 0110011 @r
++bmatxor    0100100 .......... 011 ..... 0110011 @r
+ packw      0000100 .......... 100 ..... 0111011 @r
+ packuw     0100100 .......... 100 ..... 0111011 @r
+ bsetw      0010100 .......... 001 ..... 0111011 @r
+@@ -114,7 +124,16 @@ sh1add_uw  0010000 .......... 010 ..... 0111011 @r
+ sh2add_uw  0010000 .......... 100 ..... 0111011 @r
+ sh3add_uw  0010000 .......... 110 ..... 0111011 @r
+ add_uw     0000100 .......... 000 ..... 0111011 @r
++shflw      0000100 .......... 001 ..... 0111011 @r
++unshflw    0000100 .......... 101 ..... 0111011 @r
++xperm_w    0010100 .......... 000 ..... 0110011 @r
++bfpw       0100100 .......... 111 ..... 0111011 @r
++fslw       .....10 .......... 001 ..... 0111011 @r3
++fsrw       .....10 .......... 101 ..... 0111011 @r3
++bcompressw   0000100 .......... 110 ..... 0111011 @r
++bdecompressw 0100100 .......... 110 ..... 0111011 @r
+ 
++fsriw      .....10 .......... 101 ..... 0011011 @r3w_imm
+ bsetiw     0010100 .......... 001 ..... 0011011 @sh5
+ bclriw     0100100 .......... 001 ..... 0011011 @sh5
+ binviw     0110100 .......... 001 ..... 0011011 @sh5
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 9a45f2265b..f4b030747c 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -23,6 +23,7 @@
+ %rd        7:5
+ 
+ %sh7    20:7
++%sh6    20:6
+ %csr    20:12
+ %rm     12:3
+ %nf     29:3                     !function=ex_plus_1
+@@ -41,6 +42,8 @@
+ &j    imm rd
+ &r    rd rs1 rs2
+ &r2   rd rs1
++&r3   rd rs1 rs2 rs3
++&r3i  rd rs1 imm rs3
+ &s    imm rs1 rs2
+ &u    imm rd
+ &shift     shamt rs1 rd
+@@ -59,12 +62,16 @@
+ @u       ....................      ..... ....... &u      imm=%imm_u          %rd
+ @j       ....................      ..... ....... &j      imm=%imm_j          %rd
+ 
+-@sh      ......  ...... .....  ... ..... ....... &shift  shamt=%sh7     %rs1 %rd
++
++@sh6     ......  ...... .....  ... ..... ....... &shift  shamt=%sh6     %rs1 %rd
+ @csr     ............   .....  ... ..... .......               %csr     %rs1 %rd
+ 
+ @atom_ld ..... aq:1 rl:1 ..... ........ ..... ....... &atomic rs2=0     %rs1 %rd
+ @atom_st ..... aq:1 rl:1 ..... ........ ..... ....... &atomic %rs2      %rs1 %rd
+ 
++@r3      ..... ..  ..... ..... ... ..... ....... &r3 %rs3 %rs2 %rs1 %rd
++@r3_imm  ..... .  ...... ..... ... ..... ....... &r3i %rs3 imm=%sh6 %rs1 %rd
++
+ @r4_rm   ..... ..  ..... ..... ... ..... ....... %rs3 %rs2 %rs1 %rm %rd
+ @r_rm    .......   ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
+ @r2_rm   .......   ..... ..... ... ..... ....... %rs1 %rm %rd
+@@ -124,9 +131,9 @@ sltiu    ............     ..... 011 ..... 0010011 @i
+ xori     ............     ..... 100 ..... 0010011 @i
+ ori      ............     ..... 110 ..... 0010011 @i
+ andi     ............     ..... 111 ..... 0010011 @i
+-slli     00000. ......    ..... 001 ..... 0010011 @sh
+-srli     00000. ......    ..... 101 ..... 0010011 @sh
+-srai     01000. ......    ..... 101 ..... 0010011 @sh
++slli     000000 ......    ..... 001 ..... 0010011 @sh6
++srli     000000 ......    ..... 101 ..... 0010011 @sh6
++srai     010000 ......    ..... 101 ..... 0010011 @sh6
+ add      0000000 .....    ..... 000 ..... 0110011 @r
+ sub      0100000 .....    ..... 000 ..... 0110011 @r
+ sll      0000000 .....    ..... 001 ..... 0110011 @r
+@@ -694,8 +701,17 @@ fmv_h_x    1111010  00000 ..... 000 ..... 1010011 @r2
+ clz        011000 000000 ..... 001 ..... 0010011 @r2
+ ctz        011000 000001 ..... 001 ..... 0010011 @r2
+ cpop       011000 000010 ..... 001 ..... 0010011 @r2
++
+ sext_b     011000 000100 ..... 001 ..... 0010011 @r2
+ sext_h     011000 000101 ..... 001 ..... 0010011 @r2
++crc32_b    0110000 10000 ..... 001 ..... 0010011 @r2
++crc32_h    0110000 10001 ..... 001 ..... 0010011 @r2
++crc32_w    0110000 10010 ..... 001 ..... 0010011 @r2
++
++crc32c_b   0110000 11000 ..... 001 ..... 0010011 @r2
++crc32c_h   0110000 11001 ..... 001 ..... 0010011 @r2
++crc32c_w   0110000 11010 ..... 001 ..... 0010011 @r2
++
+ 
+ andn       0100000 .......... 111 ..... 0110011 @r
+ orn        0100000 .......... 110 ..... 0110011 @r
+@@ -720,13 +736,34 @@ gorc       0010100 .......... 101 ..... 0110011 @r
+ sh1add     0010000 .......... 010 ..... 0110011 @r
+ sh2add     0010000 .......... 100 ..... 0110011 @r
+ sh3add     0010000 .......... 110 ..... 0110011 @r
++clmul      0000101 .......... 001 ..... 0110011 @r
++clmulh     0000101 .......... 011 ..... 0110011 @r
++clmulr     0000101 .......... 010 ..... 0110011 @r
++shfl       0000100 .......... 001 ..... 0110011 @r
++unshfl     0000100 .......... 101 ..... 0110011 @r
++xperm_n    0010100 .......... 010 ..... 0110011 @r
++xperm_b    0010100 .......... 100 ..... 0110011 @r
++xperm_h    0010100 .......... 110 ..... 0110011 @r
++bfp        0100100 .......... 111 ..... 0110011 @r
++cmix       .....11 .......... 001 ..... 0110011 @r3
++cmov       .....11 .......... 101 ..... 0110011 @r3
++fsl        .....10 .......... 001 ..... 0110011 @r3
++fsr        .....10 .......... 101 ..... 0110011 @r3
++bcompress    0000100 .......... 110 ..... 0110011 @r
++bdecompress  0100100 .......... 110 ..... 0110011 @r
++
++
++fsri       .....1 ........... 101 ..... 0010011 @r3_imm
++bseti      001010 ........... 001 ..... 0010011 @sh6
++bclri      010010 ........... 001 ..... 0010011 @sh6
++binvi      011010 ........... 001 ..... 0010011 @sh6
++bexti      010010 ........... 101 ..... 0010011 @sh6
++sloi       001000 ........... 001 ..... 0010011 @sh6
++sroi       001000 ........... 101 ..... 0010011 @sh6
++rori       011000 ........... 101 ..... 0010011 @sh6
++grevi      011010 ........... 101 ..... 0010011 @sh6
++gorci      001010 ........... 101 ..... 0010011 @sh6
++shfli      000010 ........... 001 ..... 0010011 @sh6
++unshfli    000010 ........... 101 ..... 0010011 @sh6
++
+ 
+-bseti      00101. ........... 001 ..... 0010011 @sh
+-bclri      01001. ........... 001 ..... 0010011 @sh
+-binvi      01101. ........... 001 ..... 0010011 @sh
+-bexti      01001. ........... 101 ..... 0010011 @sh
+-sloi       00100. ........... 001 ..... 0010011 @sh
+-sroi       00100. ........... 101 ..... 0010011 @sh
+-rori       01100. ........... 101 ..... 0010011 @sh
+-grevi      01101. ........... 101 ..... 0010011 @sh
+-gorci      00101. ........... 101 ..... 0010011 @sh
+diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
+index d69bda2f7b..394277b02f 100644
+--- a/target/riscv/insn_trans/trans_rvb.c.inc
++++ b/target/riscv/insn_trans/trans_rvb.c.inc
+@@ -237,8 +237,210 @@ GEN_TRANS_SHADD(1)
+ GEN_TRANS_SHADD(2)
+ GEN_TRANS_SHADD(3)
+ 
++
++#define GEN_TRANS_CLMUL(NAME)                                             \
++static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a)                \
++{                                                                         \
++    REQUIRE_EXT(ctx, RVB);                                                \
++    return gen_arith(ctx, a, gen_helper_##NAME);                          \
++}
++
++GEN_TRANS_CLMUL(clmul)
++GEN_TRANS_CLMUL(clmulh)
++GEN_TRANS_CLMUL(clmulr)
++
++static bool trans_shfl(DisasContext *ctx, arg_shfl *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_shfl);
++}
++
++static bool trans_unshfl(DisasContext *ctx, arg_unshfl *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_unshfl);
++}
++
++static bool trans_shfli(DisasContext *ctx, arg_shfli *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    if (a->shamt >= (TARGET_LONG_BITS / 2)) {
++        return false;
++    }
++    return gen_shifti(ctx, a, gen_helper_shfl);
++}
++
++static bool trans_unshfli(DisasContext *ctx, arg_unshfli *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    if (a->shamt >= (TARGET_LONG_BITS / 2)) {
++        return false;
++    }
++    return gen_shifti(ctx, a, gen_helper_unshfl);
++}
++
++#define GEN_TRANS_XPERM(NAME)                                   \
++static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a)      \
++{                                                               \
++    REQUIRE_EXT(ctx, RVB);                                      \
++    return gen_arith(ctx, a, gen_helper_##NAME);                \
++}
++
++GEN_TRANS_XPERM(xperm_n)
++GEN_TRANS_XPERM(xperm_b)
++GEN_TRANS_XPERM(xperm_h)
++
++static bool trans_bfp(DisasContext *ctx, arg_bfp *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_bfp);
++}
++
++static bool trans_bcompress(DisasContext *ctx, arg_bcompress *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_bcompress);
++}
++
++static bool trans_bdecompress(DisasContext *ctx, arg_bdecompress *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_bdecompress);
++}
++
++static bool trans_cmix(DisasContext *ctx, arg_cmix *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_quat(ctx, a, gen_cmix);
++}
++
++static bool trans_cmov(DisasContext *ctx, arg_cmov *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_quat(ctx, a, gen_helper_cmov);
++}
++
++static bool trans_fsl(DisasContext *ctx, arg_fsl *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_quat(ctx, a, gen_helper_fsl);
++}
++
++static bool trans_fsr(DisasContext *ctx, arg_fsr *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_quat(ctx, a, gen_helper_fsr);
++}
++
++static bool trans_fsri(DisasContext *ctx, arg_fsri *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++
++    if (a->imm >= 64) {
++        return false;
++    }
++
++    return gen_quati(ctx, a, gen_helper_fsr);
++}
++
++/* RV64-only instructions */
++#ifdef TARGET_RISCV64
++static bool trans_shflw(DisasContext *ctx, arg_shflw *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_shflw);
++}
++
++static bool trans_unshflw(DisasContext *ctx, arg_unshflw *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_unshflw);
++}
++
++static bool trans_xperm_w(DisasContext *ctx, arg_xperm_w *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_xperm_w);
++}
++
++static bool trans_bfpw(DisasContext *ctx, arg_bfpw *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_bfpw);
++}
++
++static bool trans_fslw(DisasContext *ctx, arg_fslw *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_quat(ctx, a, gen_fslw);
++}
++
++static bool trans_fsrw(DisasContext *ctx, arg_fsrw *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_quat(ctx, a, gen_fsrw);
++}
++
++static bool trans_fsriw(DisasContext *ctx, arg_fsri *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++
++    if (a->imm >= 32) {
++        return false;
++    }
++
++    return gen_quati(ctx, a, gen_fsrw);
++}
++
++static bool trans_bcompressw(DisasContext *ctx, arg_bcompressw *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_bcompressw);
++}
++
++static bool trans_bdecompressw(DisasContext *ctx, arg_bdecompressw *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_bdecompressw);
++}
++#endif
++
++#define GEN_TRANS_CRC(NAME)                                     \
++static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a)      \
++{                                                               \
++    REQUIRE_EXT(ctx, RVB);                                      \
++    return gen_unary(ctx, a, gen_helper_##NAME);                \
++}                                                               \
++
++GEN_TRANS_CRC(crc32_b)
++GEN_TRANS_CRC(crc32_h)
++GEN_TRANS_CRC(crc32_w)
++
++GEN_TRANS_CRC(crc32c_b)
++GEN_TRANS_CRC(crc32c_h)
++GEN_TRANS_CRC(crc32c_w)
+ /* RV64-only instructions */
+ #ifdef TARGET_RISCV64
++GEN_TRANS_CRC(crc32c_d)
++GEN_TRANS_CRC(crc32_d)
++
++static bool trans_bmatflip(DisasContext *ctx, arg_bmatflip *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_unary(ctx, a, gen_helper_bmatflip);
++}
++
++static bool trans_bmatxor(DisasContext *ctx, arg_bmatxor *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_bmatxor);
++}
++
++static bool trans_bmator(DisasContext *ctx, arg_bmatxor *a)
++{
++    REQUIRE_EXT(ctx, RVB);
++    return gen_arith(ctx, a, gen_helper_bmator);
++}
+ 
+ static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
+ {
+@@ -327,13 +529,13 @@ static bool trans_sloiw(DisasContext *ctx, arg_sloiw *a)
+ static bool trans_srow(DisasContext *ctx, arg_srow *a)
+ {
+     REQUIRE_EXT(ctx, RVB);
+-    return gen_shiftw(ctx, a, gen_sro);
++    return gen_shiftw(ctx, a, gen_srow);
+ }
+ 
+ static bool trans_sroiw(DisasContext *ctx, arg_sroiw *a)
+ {
+     REQUIRE_EXT(ctx, RVB);
+-    return gen_shiftiw(ctx, a, gen_sro);
++    return gen_shiftiw(ctx, a, gen_srow);
+ }
+ 
+ static bool trans_rorw(DisasContext *ctx, arg_rorw *a)
+@@ -404,14 +606,13 @@ static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a)
+     gen_get_gpr(source1, a->rs1);
+ 
+     if (a->shamt < 32) {
+-        tcg_gen_deposit_z_i64(source1, source1, a->shamt, 32);
++        tcg_gen_deposit_z_tl(source1, source1, a->shamt, 32);
+     } else {
+-        tcg_gen_shli_i64(source1, source1, a->shamt);
++        tcg_gen_shli_tl(source1, source1, a->shamt);
+     }
+ 
+     gen_set_gpr(a->rd, source1);
+     tcg_temp_free(source1);
+     return true;
+ }
+-
+ #endif
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index 744b4ffaa7..8c581d1ccf 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -731,15 +731,15 @@ GEN_SHADD(3)
+ 
+ static void gen_ctzw(TCGv ret, TCGv arg1)
+ {
+-    tcg_gen_ori_i64(ret, arg1, MAKE_64BIT_MASK(32, 32));
+-    tcg_gen_ctzi_i64(ret, ret, 64);
++    tcg_gen_ori_tl(ret, arg1, (target_ulong)MAKE_64BIT_MASK(32, 32));
++    tcg_gen_ctzi_tl(ret, ret, 64);
+ }
+ 
+ static void gen_clzw(TCGv ret, TCGv arg1)
+ {
+-    tcg_gen_ext32u_i64(ret, arg1);
+-    tcg_gen_clzi_i64(ret, ret, 64);
+-    tcg_gen_subi_i64(ret, ret, 32);
++    tcg_gen_ext32u_tl(ret, arg1);
++    tcg_gen_clzi_tl(ret, ret, 64);
++    tcg_gen_subi_tl(ret, ret, 32);
+ }
+ 
+ static void gen_cpopw(TCGv ret, TCGv arg1)
+@@ -751,17 +751,17 @@ static void gen_cpopw(TCGv ret, TCGv arg1)
+ static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2)
+ {
+     TCGv t = tcg_temp_new();
+-    tcg_gen_ext16s_i64(t, arg2);
+-    tcg_gen_deposit_i64(ret, arg1, t, 16, 48);
++    tcg_gen_ext16s_tl(t, arg2);
++    tcg_gen_deposit_tl(ret, arg1, t, 16, 48);
+     tcg_temp_free(t);
+ }
+ 
+ static void gen_packuw(TCGv ret, TCGv arg1, TCGv arg2)
+ {
+     TCGv t = tcg_temp_new();
+-    tcg_gen_shri_i64(t, arg1, 16);
+-    tcg_gen_deposit_i64(ret, arg2, t, 0, 16);
+-    tcg_gen_ext32s_i64(ret, ret);
++    tcg_gen_shri_tl(t, arg1, 16);
++    tcg_gen_deposit_tl(ret, arg2, t, 0, 16);
++    tcg_gen_ext32s_tl(ret, ret);
+     tcg_temp_free(t);
+ }
+ 
+@@ -801,6 +801,26 @@ static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
+     tcg_temp_free_i32(t2);
+ }
+ 
++static void gen_srow(TCGv ret, TCGv arg1, TCGv arg2)
++{
++    TCGv_i32 t1 = tcg_temp_new_i32();
++    TCGv_i32 t2 = tcg_temp_new_i32();
++
++    /* truncate to 32-bits */
++    tcg_gen_trunc_tl_i32(t1, arg1);
++    tcg_gen_trunc_tl_i32(t2, arg2);
++
++    tcg_gen_not_i32(t1, t1);
++    tcg_gen_shr_i32(t1, t1, t2);
++    tcg_gen_not_i32(t1, t1);
++
++    /* sign-extend 64-bits */
++    tcg_gen_ext_i32_tl(ret, t1);
++
++    tcg_temp_free_i32(t1);
++    tcg_temp_free_i32(t2);
++}
++
+ static void gen_grevw(TCGv ret, TCGv arg1, TCGv arg2)
+ {
+     tcg_gen_ext32u_tl(arg1, arg1);
+@@ -836,6 +856,42 @@ static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
+     tcg_gen_add_tl(ret, arg1, arg2);
+ }
+ 
++#define GEN_RV64ONLY_INSN_3(NAME)                                   \
++static void gen_##NAME(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3)   \
++{                                                                   \
++    gen_helper_##NAME(ret, arg1, arg2, arg3);                       \
++    tcg_gen_ext32s_tl(ret, ret);                                    \
++}                                                                   \
++
++GEN_RV64ONLY_INSN_3(fslw)
++GEN_RV64ONLY_INSN_3(fsrw)
++
++#define GEN_RV64ONLY_INSN_2(NAME)                                   \
++static void gen_##NAME(TCGv ret, TCGv arg1, TCGv arg2)              \
++{                                                                   \
++    gen_helper_##NAME(ret, arg1, arg2);                             \
++    tcg_gen_ext32s_tl(ret, ret);                                    \
++}                                                                   \
++
++GEN_RV64ONLY_INSN_2(shflw)
++GEN_RV64ONLY_INSN_2(unshflw)
++GEN_RV64ONLY_INSN_2(bfpw)
++
++static void gen_bcompressw(TCGv ret, TCGv arg1, TCGv arg2)
++{
++    tcg_gen_ext32u_tl(arg1, arg1);
++    tcg_gen_ext32u_tl(arg2, arg2);
++    gen_helper_bcompress(ret, arg1, arg2);
++    tcg_gen_ext32s_tl(ret, ret);
++}
++
++static void gen_bdecompressw(TCGv ret, TCGv arg1, TCGv arg2)
++{
++    tcg_gen_ext32u_tl(arg1, arg1);
++    tcg_gen_ext32u_tl(arg2, arg2);
++    gen_helper_bdecompress(ret, arg1, arg2);
++    tcg_gen_ext32s_tl(ret, ret);
++}
+ #endif
+ 
+ static bool gen_arith(DisasContext *ctx, arg_r *a,
+@@ -856,6 +912,35 @@ static bool gen_arith(DisasContext *ctx, arg_r *a,
+     return true;
+ }
+ 
++static void gen_cmix(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3)
++{
++    tcg_gen_and_tl(arg1, arg1, arg2);
++    tcg_gen_not_tl(arg2, arg2);
++    tcg_gen_and_tl(arg3, arg3, arg2);
++    tcg_gen_or_tl(ret, arg1, arg3);
++}
++
++static bool gen_quat(DisasContext *ctx, arg_r3 *a,
++                     void(*func)(TCGv, TCGv, TCGv, TCGv))
++{
++    TCGv source1, source2, source3;
++    source1 = tcg_temp_new();
++    source2 = tcg_temp_new();
++    source3 = tcg_temp_new();
++
++    gen_get_gpr(source1, a->rs1);
++    gen_get_gpr(source2, a->rs2);
++    gen_get_gpr(source3, a->rs3);
++
++    (*func)(source1, source1, source2, source3);
++
++    gen_set_gpr(a->rd, source1);
++    tcg_temp_free(source1);
++    tcg_temp_free(source2);
++    tcg_temp_free(source3);
++    return true;
++}
++
+ static bool gen_shift(DisasContext *ctx, arg_r *a,
+                         void(*func)(TCGv, TCGv, TCGv))
+ {
+@@ -874,6 +959,27 @@ static bool gen_shift(DisasContext *ctx, arg_r *a,
+     return true;
+ }
+ 
++static bool gen_quati(DisasContext *ctx, arg_r3i *a,
++                      void(*func)(TCGv, TCGv, TCGv, TCGv))
++{
++    TCGv source1, source2, source3;
++    source1 = tcg_temp_new();
++    source2 = tcg_temp_new();
++    source3 = tcg_temp_new();
++
++    gen_get_gpr(source1, a->rs1);
++    tcg_gen_movi_tl(source2, a->imm);
++    gen_get_gpr(source3, a->rs3);
++
++    (*func)(source1, source1, source2, source3);
++
++    gen_set_gpr(a->rd, source1);
++    tcg_temp_free(source1);
++    tcg_temp_free(source2);
++    tcg_temp_free(source3);
++    return true;
++}
++
+ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
+ {
+     DisasContext *ctx = container_of(dcbase, DisasContext, base);
+-- 
+2.33.1
+

+ 214 - 0
recipes-devtools/qemu/qemu/0001-softfloat-add-APIs-to-handle-alternative-sNaN-propag.patch

@@ -0,0 +1,214 @@
+From 7616e27c73841ac1e8fa031d843bde1325307bab Mon Sep 17 00:00:00 2001
+From: Chih-Min Chao <chihmin.chao@sifive.com>
+Date: Thu, 30 Jul 2020 02:52:23 -0700
+Subject: [PATCH 001/107] softfloat: add APIs to handle alternative sNaN
+ propagation
+
+For "fmax/fmin ft0, ft1, ft2" and if one of the inputs is sNaN,
+  The original logic
+    return NaN and set invalid flag if ft1 == sNaN || ft2 == sNan
+
+  The alternative path
+    set invalid flag if ft1 == sNaN || ft2 == sNaN
+    return NaN if ft1 == sNaN && ft2 == sNaN
+
+   The ieee754 spec allows both implementation and some architecture such
+   as riscv choose differenct defintion in two spec versions.
+   (riscv-spec-v2.2 use original version, riscv-spec-20191213 changes to
+    alternative)
+
+Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
+Message-Id: <1596102747-20226-3-git-send-email-chihmin.chao@sifive.com>
+---
+ fpu/softfloat.c         | 92 +++++++++++++++++++++++++----------------
+ include/fpu/softfloat.h |  6 +++
+ 2 files changed, 63 insertions(+), 35 deletions(-)
+
+diff --git a/fpu/softfloat.c b/fpu/softfloat.c
+index 67cfa0fd82..edaa760310 100644
+--- a/fpu/softfloat.c
++++ b/fpu/softfloat.c
+@@ -894,11 +894,16 @@ static FloatParts return_nan(FloatParts a, float_status *s)
+     return a;
+ }
+ 
+-static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
++static void set_snan_flag(FloatParts a, FloatParts b, float_status *s)
+ {
+     if (is_snan(a.cls) || is_snan(b.cls)) {
+         s->float_exception_flags |= float_flag_invalid;
+     }
++}
++
++static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
++{
++    set_snan_flag(a, b, s);
+ 
+     if (s->default_nan_mode) {
+         return parts_default_nan(s);
+@@ -3091,23 +3096,32 @@ bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status)
+  * and minNumMag() from the IEEE-754 2008.
+  */
+ static FloatParts minmax_floats(FloatParts a, FloatParts b, bool ismin,
+-                                bool ieee, bool ismag, float_status *s)
++                                bool ieee, bool ismag, bool issnan_prop,
++                                float_status *s)
+ {
+     if (unlikely(is_nan(a.cls) || is_nan(b.cls))) {
+         if (ieee) {
+             /* Takes two floating-point values `a' and `b', one of
+              * which is a NaN, and returns the appropriate NaN
+              * result. If either `a' or `b' is a signaling NaN,
+-             * the invalid exception is raised.
++             * the invalid exception is raised but the NaN
++             * propagation is 'shall'.
+              */
+             if (is_snan(a.cls) || is_snan(b.cls)) {
+-                return pick_nan(a, b, s);
+-            } else if (is_nan(a.cls) && !is_nan(b.cls)) {
++                if (issnan_prop) {
++                    return pick_nan(a, b, s);
++                } else {
++                    set_snan_flag(a, b, s);
++                }
++            }
++
++            if (is_nan(a.cls) && !is_nan(b.cls)) {
+                 return b;
+             } else if (is_nan(b.cls) && !is_nan(a.cls)) {
+                 return a;
+             }
+         }
++
+         return pick_nan(a, b, s);
+     } else {
+         int a_exp, b_exp;
+@@ -3161,56 +3175,64 @@ static FloatParts minmax_floats(FloatParts a, FloatParts b, bool ismin,
+     }
+ }
+ 
+-#define MINMAX(sz, name, ismin, isiee, ismag)                           \
++#define MINMAX(sz, name, ismin, isiee, ismag, issnan_prop)              \
+ float ## sz float ## sz ## _ ## name(float ## sz a, float ## sz b,      \
+                                      float_status *s)                   \
+ {                                                                       \
+     FloatParts pa = float ## sz ## _unpack_canonical(a, s);             \
+     FloatParts pb = float ## sz ## _unpack_canonical(b, s);             \
+-    FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag, s);      \
++    FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag,          \
++                                  issnan_prop, s);                      \
+                                                                         \
+     return float ## sz ## _round_pack_canonical(pr, s);                 \
+ }
+ 
+-MINMAX(16, min, true, false, false)
+-MINMAX(16, minnum, true, true, false)
+-MINMAX(16, minnummag, true, true, true)
+-MINMAX(16, max, false, false, false)
+-MINMAX(16, maxnum, false, true, false)
+-MINMAX(16, maxnummag, false, true, true)
+-
+-MINMAX(32, min, true, false, false)
+-MINMAX(32, minnum, true, true, false)
+-MINMAX(32, minnummag, true, true, true)
+-MINMAX(32, max, false, false, false)
+-MINMAX(32, maxnum, false, true, false)
+-MINMAX(32, maxnummag, false, true, true)
+-
+-MINMAX(64, min, true, false, false)
+-MINMAX(64, minnum, true, true, false)
+-MINMAX(64, minnummag, true, true, true)
+-MINMAX(64, max, false, false, false)
+-MINMAX(64, maxnum, false, true, false)
+-MINMAX(64, maxnummag, false, true, true)
++MINMAX(16, min, true, false, false, true)
++MINMAX(16, minnum, true, true, false, true)
++MINMAX(16, minnum_noprop, true, true, false, false)
++MINMAX(16, minnummag, true, true, true, true)
++MINMAX(16, max, false, false, false, true)
++MINMAX(16, maxnum, false, true, false, true)
++MINMAX(16, maxnum_noprop, false, true, false, false)
++MINMAX(16, maxnummag, false, true, true, true)
++
++MINMAX(32, min, true, false, false, true)
++MINMAX(32, minnum, true, true, false, true)
++MINMAX(32, minnum_noprop, true, true, false, false)
++MINMAX(32, minnummag, true, true, true, true)
++MINMAX(32, max, false, false, false, true)
++MINMAX(32, maxnum, false, true, false, true)
++MINMAX(32, maxnum_noprop, false, true, false, false)
++MINMAX(32, maxnummag, false, true, true, true)
++
++MINMAX(64, min, true, false, false, true)
++MINMAX(64, minnum, true, true, false, true)
++MINMAX(64, minnum_noprop, true, true, false, false)
++MINMAX(64, minnummag, true, true, true, true)
++MINMAX(64, max, false, false, false, true)
++MINMAX(64, maxnum, false, true, false, true)
++MINMAX(64, maxnum_noprop, false, true, false, false)
++MINMAX(64, maxnummag, false, true, true, true)
+ 
+ #undef MINMAX
+ 
+-#define BF16_MINMAX(name, ismin, isiee, ismag)                          \
++#define BF16_MINMAX(name, ismin, isiee, ismag, issnan_prop)             \
+ bfloat16 bfloat16_ ## name(bfloat16 a, bfloat16 b, float_status *s)     \
+ {                                                                       \
+     FloatParts pa = bfloat16_unpack_canonical(a, s);                    \
+     FloatParts pb = bfloat16_unpack_canonical(b, s);                    \
+-    FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag, s);      \
++    FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag,          \
++                                  issnan_prop, s);                      \
+                                                                         \
+     return bfloat16_round_pack_canonical(pr, s);                        \
+ }
+ 
+-BF16_MINMAX(min, true, false, false)
+-BF16_MINMAX(minnum, true, true, false)
+-BF16_MINMAX(minnummag, true, true, true)
+-BF16_MINMAX(max, false, false, false)
+-BF16_MINMAX(maxnum, false, true, false)
+-BF16_MINMAX(maxnummag, false, true, true)
++BF16_MINMAX(min, true, false, false, true)
++BF16_MINMAX(minnum, true, true, false, true)
++BF16_MINMAX(minnummag, true, true, true, true)
++BF16_MINMAX(max, false, false, false, true)
++BF16_MINMAX(maxnum, false, true, false, true)
++BF16_MINMAX(maxnummag, false, true, true, true)
+ 
+ #undef BF16_MINMAX
+ 
+diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
+index 78ad5ca738..5cd7d44bc9 100644
+--- a/include/fpu/softfloat.h
++++ b/include/fpu/softfloat.h
+@@ -240,6 +240,8 @@ float16 float16_minnum(float16, float16, float_status *status);
+ float16 float16_maxnum(float16, float16, float_status *status);
+ float16 float16_minnummag(float16, float16, float_status *status);
+ float16 float16_maxnummag(float16, float16, float_status *status);
++float16 float16_minnum_noprop(float16, float16, float_status *status);
++float16 float16_maxnum_noprop(float16, float16, float_status *status);
+ float16 float16_sqrt(float16, float_status *status);
+ FloatRelation float16_compare(float16, float16, float_status *status);
+ FloatRelation float16_compare_quiet(float16, float16, float_status *status);
+@@ -586,6 +588,8 @@ float32 float32_minnum(float32, float32, float_status *status);
+ float32 float32_maxnum(float32, float32, float_status *status);
+ float32 float32_minnummag(float32, float32, float_status *status);
+ float32 float32_maxnummag(float32, float32, float_status *status);
++float32 float32_minnum_noprop(float32, float32, float_status *status);
++float32 float32_maxnum_noprop(float32, float32, float_status *status);
+ bool float32_is_quiet_nan(float32, float_status *status);
+ bool float32_is_signaling_nan(float32, float_status *status);
+ float32 float32_silence_nan(float32, float_status *status);
+@@ -775,6 +779,8 @@ float64 float64_minnum(float64, float64, float_status *status);
+ float64 float64_maxnum(float64, float64, float_status *status);
+ float64 float64_minnummag(float64, float64, float_status *status);
+ float64 float64_maxnummag(float64, float64, float_status *status);
++float64 float64_minnum_noprop(float64, float64, float_status *status);
++float64 float64_maxnum_noprop(float64, float64, float_status *status);
+ bool float64_is_quiet_nan(float64 a, float_status *status);
+ bool float64_is_signaling_nan(float64, float_status *status);
+ float64 float64_silence_nan(float64, float_status *status);
+-- 
+2.33.1
+

+ 110 - 0
recipes-devtools/qemu/qemu/0002-Add-four-cache-csr-instruction.patch

@@ -0,0 +1,110 @@
+From a32d71b0a78a30fe9a72db6b3bfaed8c5caab2e5 Mon Sep 17 00:00:00 2001
+From: "yilun.xie" <yilun.xie@starfivetech.com>
+Date: Wed, 29 Sep 2021 16:08:51 +0800
+Subject: [PATCH 2/2] Add four cache csr instruction
+
+---
+ target/riscv/helper.h                   |  7 +++++++
+ target/riscv/insn32.decode              |  9 +++++++++
+ target/riscv/insn_trans/trans_rvi.c.inc | 24 ++++++++++++++++++++++++
+ target/riscv/op_helper.c                |  6 ++++++
+ 4 files changed, 46 insertions(+)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 9f0abef25f..b14a656877 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -136,6 +136,13 @@ DEF_HELPER_FLAGS_3(fsrw, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
+ DEF_HELPER_3(csrrw, tl, env, tl, tl)
+ DEF_HELPER_4(csrrs, tl, env, tl, tl, tl)
+ DEF_HELPER_4(csrrc, tl, env, tl, tl, tl)
++
++/* Custom Cache CSR */
++DEF_HELPER_1(cflush_d_l1, void, tl)
++DEF_HELPER_1(cdiscard_d_l1, void, tl)
++DEF_HELPER_1(cflush_d_l2, void, tl)
++DEF_HELPER_1(cdiscard_d_l2, void, tl)
++
+ #ifndef CONFIG_USER_ONLY
+ DEF_HELPER_2(sret, tl, env, tl)
+ DEF_HELPER_2(mret, tl, env, tl)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index f4b030747c..b784946c8c 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -96,6 +96,9 @@
+ @sfence_vm  ....... ..... .....   ... ..... ....... %rs1
+ 
+ 
++# Custom Cache CSR
++@csr_cache  ............ ..... ... ..... .......    %rs1
++
+ # *** Privileged Instructions ***
+ ecall       000000000000     00000 000 00000 1110011
+ ebreak      000000000001     00000 000 00000 1110011
+@@ -106,6 +109,12 @@ wfi         0001000    00101 00000 000 00000 1110011
+ sfence_vma  0001001    ..... ..... 000 00000 1110011 @sfence_vma
+ sfence_vm   0001000    00100 ..... 000 00000 1110011 @sfence_vm
+ 
++# *** Custom cache CSR ***
++cflush_d_l1     111111000000 ..... 000 00000 1110011 @csr_cache
++cdiscard_d_l1   111111000010 ..... 000 00000 1110011 @csr_cache
++cflush_d_l2     111111000100 ..... 000 00000 1110011 @csr_cache
++cdiscard_d_l2   111111000110 ..... 000 00000 1110011 @csr_cache
++
+ # *** RV32I Base Instruction Set ***
+ lui      ....................       ..... 0110111 @u
+ auipc    ....................       ..... 0010111 @u
+diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
+index 7b89420184..bdabeaf376 100644
+--- a/target/riscv/insn_trans/trans_rvi.c.inc
++++ b/target/riscv/insn_trans/trans_rvi.c.inc
+@@ -529,3 +529,27 @@ static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a)
+     RISCV_OP_CSR_POST;
+     return true;
+ }
++
++static bool trans_cflush_d_l1(DisasContext *ctx, arg_cflush_d_l1 *a)
++{
++    gen_helper_cflush_d_l1(cpu_gpr[a->rs1]);
++    return true;
++}
++
++static bool trans_cdiscard_d_l1(DisasContext *ctx, arg_cflush_d_l1 *a)
++{
++    gen_helper_cdiscard_d_l1(cpu_gpr[a->rs1]);
++    return true;
++}
++
++static bool trans_cflush_d_l2(DisasContext *ctx, arg_cflush_d_l1 *a)
++{
++    gen_helper_cflush_d_l2(cpu_gpr[a->rs1]);
++    return true;
++}
++
++static bool trans_cdiscard_d_l2(DisasContext *ctx, arg_cflush_d_l1 *a)
++{
++    gen_helper_cdiscard_d_l2(cpu_gpr[a->rs1]);
++    return true;
++}
+\ No newline at end of file
+diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
+index 1eddcb94de..8d034236ee 100644
+--- a/target/riscv/op_helper.c
++++ b/target/riscv/op_helper.c
+@@ -74,6 +74,12 @@ target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
+     return val;
+ }
+ 
++void helper_cflush_d_l1(target_ulong rs1) {}
++void helper_cdiscard_d_l1(target_ulong rs1) {}
++void helper_cflush_d_l2(target_ulong rs1) {}
++void helper_cdiscard_d_l2(target_ulong rs1) {}
++
++
+ #ifndef CONFIG_USER_ONLY
+ 
+ target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
+-- 
+2.33.1
+

+ 53 - 0
recipes-devtools/qemu/qemu/0002-target-riscv-change-the-api-for-single-double-fmin-f.patch

@@ -0,0 +1,53 @@
+From 97a90c01422b8bc1b685d15d6923a72221bb7275 Mon Sep 17 00:00:00 2001
+From: Chih-Min Chao <chihmin.chao@sifive.com>
+Date: Wed, 8 Jul 2020 00:32:02 -0700
+Subject: [PATCH 002/107] target/riscv: change the api for single/double
+ fmin/fmax
+
+The sNaN propagation behavior has been changed since
+cd20cee7 in https://github.com/riscv/riscv-isa-manual
+
+Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
+---
+ target/riscv/fpu_helper.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
+index 7c4ab92ecb..5d2cec3de3 100644
+--- a/target/riscv/fpu_helper.c
++++ b/target/riscv/fpu_helper.c
+@@ -174,14 +174,14 @@ uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+ {
+     float32 frs1 = check_nanbox_s(rs1);
+     float32 frs2 = check_nanbox_s(rs2);
+-    return nanbox_s(float32_minnum(frs1, frs2, &env->fp_status));
++    return nanbox_s(float32_minnum_noprop(frs1, frs2, &env->fp_status));
+ }
+ 
+ uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+ {
+     float32 frs1 = check_nanbox_s(rs1);
+     float32 frs2 = check_nanbox_s(rs2);
+-    return nanbox_s(float32_maxnum(frs1, frs2, &env->fp_status));
++    return nanbox_s(float32_maxnum_noprop(frs1, frs2, &env->fp_status));
+ }
+ 
+ uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1)
+@@ -283,12 +283,12 @@ uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+ 
+ uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+ {
+-    return float64_minnum(frs1, frs2, &env->fp_status);
++    return float64_minnum_noprop(frs1, frs2, &env->fp_status);
+ }
+ 
+ uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+ {
+-    return float64_maxnum(frs1, frs2, &env->fp_status);
++    return float64_maxnum_noprop(frs1, frs2, &env->fp_status);
+ }
+ 
+ uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
+-- 
+2.33.1
+

+ 38 - 0
recipes-devtools/qemu/qemu/0003-target-riscv-support-x-Zfh-in-cpu-option.patch

@@ -0,0 +1,38 @@
+From e14c75b57170fb7f4bfae09a3bd291bc024c3d3c Mon Sep 17 00:00:00 2001
+From: Chih-Min Chao <chihmin.chao@sifive.com>
+Date: Wed, 1 Jul 2020 21:05:41 -0700
+Subject: [PATCH 003/107] target/riscv: support 'x-Zfh' in cpu option
+
+Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
+---
+ target/riscv/cpu.c | 1 +
+ target/riscv/cpu.h | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
+index 7d6ed80f6b..636d006f80 100644
+--- a/target/riscv/cpu.c
++++ b/target/riscv/cpu.c
+@@ -545,6 +545,7 @@ static Property riscv_cpu_properties[] = {
+     /* This is experimental so mark with 'x-' */
+     DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
+     DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false),
++    DEFINE_PROP_BOOL("x-Zfh", RISCVCPU, cfg.ext_zfh, false),
+     DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
+     DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
+     DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index 0a33d387ba..6362394204 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -295,6 +295,7 @@ struct RISCVCPU {
+         bool ext_counters;
+         bool ext_ifencei;
+         bool ext_icsr;
++        bool ext_zfh;
+ 
+         char *priv_spec;
+         char *user_spec;
+-- 
+2.33.1
+

+ 979 - 0
recipes-devtools/qemu/qemu/0004-target-riscv-Implement-zfh-extension.patch

@@ -0,0 +1,979 @@
+From 9db41d30248bce990151f2c06c65abb845a844a8 Mon Sep 17 00:00:00 2001
+From: Kito Cheng <kito.cheng@sifive.com>
+Date: Thu, 26 Mar 2020 21:55:37 +0800
+Subject: [PATCH 004/107] target/riscv: Implement zfh extension.
+
+Signed-off-by: Kito Cheng <kito.cheng@sifive.com>
+Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
+---
+ target/riscv/fpu_helper.c                 | 180 ++++++++
+ target/riscv/helper.h                     |  34 ++
+ target/riscv/insn32-64.decode             |   6 +
+ target/riscv/insn32.decode                |  32 ++
+ target/riscv/insn_trans/trans_rvzfh.c.inc | 535 ++++++++++++++++++++++
+ target/riscv/internals.h                  |  16 +
+ target/riscv/translate.c                  |  18 +
+ 7 files changed, 821 insertions(+)
+ create mode 100644 target/riscv/insn_trans/trans_rvzfh.c.inc
+
+diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
+index 5d2cec3de3..09d37c1384 100644
+--- a/target/riscv/fpu_helper.c
++++ b/target/riscv/fpu_helper.c
+@@ -81,6 +81,15 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
+     set_float_rounding_mode(softrm, &env->fp_status);
+ }
+ 
++static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
++                           uint64_t rs3, int flags)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    float16 frs3 = check_nanbox_h(rs3);
++    return nanbox_h(float16_muladd(frs1, frs2, frs3, flags, &env->fp_status));
++}
++
+ static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
+                            uint64_t rs3, int flags)
+ {
+@@ -102,6 +111,12 @@ uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+     return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status);
+ }
+ 
++uint64_t helper_fmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
++                        uint64_t frs3)
++{
++    return do_fmadd_h(env, frs1, frs2, frs3, 0);
++}
++
+ uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                         uint64_t frs3)
+ {
+@@ -115,6 +130,12 @@ uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                           &env->fp_status);
+ }
+ 
++uint64_t helper_fmsub_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
++                        uint64_t frs3)
++{
++    return do_fmadd_h(env, frs1, frs2, frs3, float_muladd_negate_c);
++}
++
+ uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                          uint64_t frs3)
+ {
+@@ -128,6 +149,12 @@ uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                           &env->fp_status);
+ }
+ 
++uint64_t helper_fnmsub_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
++                         uint64_t frs3)
++{
++    return do_fmadd_h(env, frs1, frs2, frs3, float_muladd_negate_product);
++}
++
+ uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                          uint64_t frs3)
+ {
+@@ -142,6 +169,13 @@ uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+                           float_muladd_negate_product, &env->fp_status);
+ }
+ 
++uint64_t helper_fnmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
++                         uint64_t frs3)
++{
++    return do_fmadd_h(env, frs1, frs2, frs3,
++                      float_muladd_negate_c | float_muladd_negate_product);
++}
++
+ uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+ {
+     float32 frs1 = check_nanbox_s(rs1);
+@@ -366,3 +400,149 @@ target_ulong helper_fclass_d(uint64_t frs1)
+ {
+     return fclass_d(frs1);
+ }
++
++uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return nanbox_h(float16_add(frs1, frs2, &env->fp_status));
++}
++
++uint64_t helper_fsub_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return nanbox_h(float16_sub(frs1, frs2, &env->fp_status));
++}
++
++uint64_t helper_fmul_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return nanbox_h(float16_mul(frs1, frs2, &env->fp_status));
++}
++
++uint64_t helper_fdiv_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return nanbox_h(float16_div(frs1, frs2, &env->fp_status));
++}
++
++uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return nanbox_h(float16_minnum_noprop(frs1, frs2, &env->fp_status));
++}
++
++uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return nanbox_h(float16_maxnum_noprop(frs1, frs2, &env->fp_status));
++}
++
++uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return nanbox_h(float16_sqrt(frs1, &env->fp_status));
++}
++
++target_ulong helper_fle_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return float16_le(frs1, frs2, &env->fp_status);
++}
++
++target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return float16_lt(frs1, frs2, &env->fp_status);
++}
++
++target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    float16 frs2 = check_nanbox_h(rs2);
++    return float16_eq_quiet(frs1, frs2, &env->fp_status);
++}
++
++target_ulong helper_fclass_h(uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return fclass_h(frs1);
++}
++
++target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return float16_to_int32(frs1, &env->fp_status);
++}
++
++target_ulong helper_fcvt_wu_h(CPURISCVState *env, uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return (int32_t)float16_to_uint32(frs1, &env->fp_status);
++}
++
++#if defined(TARGET_RISCV64)
++uint64_t helper_fcvt_l_h(CPURISCVState *env, uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return float16_to_int64(frs1, &env->fp_status);
++}
++
++uint64_t helper_fcvt_lu_h(CPURISCVState *env, uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return float16_to_uint64(frs1, &env->fp_status);
++}
++#endif
++
++uint64_t helper_fcvt_h_w(CPURISCVState *env, target_ulong rs1)
++{
++    return nanbox_h(int32_to_float16((int32_t)rs1, &env->fp_status));
++}
++
++uint64_t helper_fcvt_h_wu(CPURISCVState *env, target_ulong rs1)
++{
++    return nanbox_h(uint32_to_float16((uint32_t)rs1, &env->fp_status));
++}
++
++#if defined(TARGET_RISCV64)
++uint64_t helper_fcvt_h_l(CPURISCVState *env, uint64_t rs1)
++{
++    return nanbox_h(int64_to_float16(rs1, &env->fp_status));
++}
++
++uint64_t helper_fcvt_h_lu(CPURISCVState *env, uint64_t rs1)
++{
++    return nanbox_h(uint64_to_float16(rs1, &env->fp_status));
++}
++#endif
++
++uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1)
++{
++    float32 frs1 = check_nanbox_s(rs1);
++    return nanbox_h(float32_to_float16(frs1, true, &env->fp_status));
++}
++
++uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return nanbox_s(float16_to_float32(frs1, true, &env->fp_status));
++}
++
++uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1)
++{
++    return nanbox_h(float64_to_float16(rs1, true, &env->fp_status));
++}
++
++uint64_t helper_fcvt_d_h(CPURISCVState *env, uint64_t rs1)
++{
++    float16 frs1 = check_nanbox_h(rs1);
++    return float16_to_float64(frs1, true, &env->fp_status);
++}
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index e3f3f41e89..1104a3540a 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -7,12 +7,16 @@ DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32)
+ /* Floating Point - fused */
+ DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
++DEF_HELPER_FLAGS_4(fmadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
++DEF_HELPER_FLAGS_4(fmsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
++DEF_HELPER_FLAGS_4(fnmsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
++DEF_HELPER_FLAGS_4(fnmadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+ 
+ /* Floating Point - Single Precision */
+ DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+@@ -58,6 +62,36 @@ DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, i64)
+ DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, i64)
+ DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64)
+ 
++/* Floating Point - Half Precision */
++DEF_HELPER_FLAGS_3(fadd_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
++DEF_HELPER_FLAGS_3(fsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
++DEF_HELPER_FLAGS_3(fmul_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
++DEF_HELPER_FLAGS_3(fdiv_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
++DEF_HELPER_FLAGS_3(fmin_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
++DEF_HELPER_FLAGS_3(fmax_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
++DEF_HELPER_FLAGS_2(fsqrt_h, TCG_CALL_NO_RWG, i64, env, i64)
++DEF_HELPER_FLAGS_3(fle_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
++DEF_HELPER_FLAGS_3(flt_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
++DEF_HELPER_FLAGS_3(feq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
++DEF_HELPER_FLAGS_2(fcvt_s_h, TCG_CALL_NO_RWG, i64, env, i64)
++DEF_HELPER_FLAGS_2(fcvt_h_s, TCG_CALL_NO_RWG, i64, env, i64)
++DEF_HELPER_FLAGS_2(fcvt_d_h, TCG_CALL_NO_RWG, i64, env, i64)
++DEF_HELPER_FLAGS_2(fcvt_h_d, TCG_CALL_NO_RWG, i64, env, i64)
++DEF_HELPER_FLAGS_2(fcvt_w_h, TCG_CALL_NO_RWG, tl, env, i64)
++DEF_HELPER_FLAGS_2(fcvt_wu_h, TCG_CALL_NO_RWG, tl, env, i64)
++#if defined(TARGET_RISCV64)
++DEF_HELPER_FLAGS_2(fcvt_l_h, TCG_CALL_NO_RWG, tl, env, i64)
++DEF_HELPER_FLAGS_2(fcvt_lu_h, TCG_CALL_NO_RWG, tl, env, i64)
++#endif
++DEF_HELPER_FLAGS_2(fcvt_h_w, TCG_CALL_NO_RWG, i64, env, tl)
++DEF_HELPER_FLAGS_2(fcvt_h_wu, TCG_CALL_NO_RWG, i64, env, tl)
++#if defined(TARGET_RISCV64)
++DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
++DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
++#endif
++DEF_HELPER_FLAGS_1(fclass_h, TCG_CALL_NO_RWG_SE, tl, i64)
++
++
+ /* Special functions */
+ DEF_HELPER_3(csrrw, tl, env, tl, tl)
+ DEF_HELPER_4(csrrs, tl, env, tl, tl, tl)
+diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode
+index 8157dee8b7..1f5d0b7a5c 100644
+--- a/target/riscv/insn32-64.decode
++++ b/target/riscv/insn32-64.decode
+@@ -86,3 +86,9 @@ fmv_d_x    1111001  00000 ..... 000 ..... 1010011 @r2
+ hlv_wu    0110100  00001   ..... 100 ..... 1110011 @r2
+ hlv_d     0110110  00000   ..... 100 ..... 1110011 @r2
+ hsv_d     0110111  .....   ..... 100 00000 1110011 @r2_s
++
++# *** RV64Zfh Standard Extension (in addition to RV32Zfh) ***
++fcvt_l_h   1100010  00010 ..... ... ..... 1010011 @r2_rm
++fcvt_lu_h  1100010  00011 ..... ... ..... 1010011 @r2_rm
++fcvt_h_l   1101010  00010 ..... ... ..... 1010011 @r2_rm
++fcvt_h_lu  1101010  00011 ..... ... ..... 1010011 @r2_rm
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 84080dd18c..8d9064a7a0 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -592,3 +592,35 @@ vcompress_vm    010111 - ..... ..... 010 ..... 1010111 @r
+ 
+ vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm
+ vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
++
++# *** RV32Zfh Extension ***
++flh        ............   ..... 001 ..... 0000111 @i
++fsh        .......  ..... ..... 001 ..... 0100111 @s
++fmadd_h    ..... 10 ..... ..... ... ..... 1000011 @r4_rm
++fmsub_h    ..... 10 ..... ..... ... ..... 1000111 @r4_rm
++fnmsub_h   ..... 10 ..... ..... ... ..... 1001011 @r4_rm
++fnmadd_h   ..... 10 ..... ..... ... ..... 1001111 @r4_rm
++fadd_h     0000010  ..... ..... ... ..... 1010011 @r_rm
++fsub_h     0000110  ..... ..... ... ..... 1010011 @r_rm
++fmul_h     0001010  ..... ..... ... ..... 1010011 @r_rm
++fdiv_h     0001110  ..... ..... ... ..... 1010011 @r_rm
++fsqrt_h    0101110  00000 ..... ... ..... 1010011 @r2_rm
++fsgnj_h    0010010  ..... ..... 000 ..... 1010011 @r
++fsgnjn_h   0010010  ..... ..... 001 ..... 1010011 @r
++fsgnjx_h   0010010  ..... ..... 010 ..... 1010011 @r
++fmin_h     0010110  ..... ..... 000 ..... 1010011 @r
++fmax_h     0010110  ..... ..... 001 ..... 1010011 @r
++fcvt_h_s   0100010  00000 ..... ... ..... 1010011 @r2_rm
++fcvt_s_h   0100000  00010 ..... ... ..... 1010011 @r2_rm
++fcvt_h_d   0100010  00001 ..... ... ..... 1010011 @r2_rm
++fcvt_d_h   0100001  00010 ..... ... ..... 1010011 @r2_rm
++fcvt_w_h   1100010  00000 ..... ... ..... 1010011 @r2_rm
++fcvt_wu_h  1100010  00001 ..... ... ..... 1010011 @r2_rm
++fmv_x_h    1110010  00000 ..... 000 ..... 1010011 @r2
++feq_h      1010010  ..... ..... 010 ..... 1010011 @r
++flt_h      1010010  ..... ..... 001 ..... 1010011 @r
++fle_h      1010010  ..... ..... 000 ..... 1010011 @r
++fclass_h   1110010  00000 ..... 001 ..... 1010011 @r2
++fcvt_h_w   1101010  00000 ..... ... ..... 1010011 @r2_rm
++fcvt_h_wu  1101010  00001 ..... ... ..... 1010011 @r2_rm
++fmv_h_x    1111010  00000 ..... 000 ..... 1010011 @r2
+diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
+new file mode 100644
+index 0000000000..4c483f6372
+--- /dev/null
++++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
+@@ -0,0 +1,535 @@
++/*
++ * RISC-V translation routines for the RV64Zfh Standard Extension.
++ *
++ * Copyright (c) 2020 Chih-Min Chao, chihmin.chao@sifive.com
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2 or later, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#define REQUIRE_ZFH(ctx) do { \
++    return ctx->ext_zfh;      \
++} while (0)
++
++static bool trans_flh(DisasContext *ctx, arg_flh *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_get_gpr(t0, a->rs1);
++
++    tcg_gen_addi_tl(t0, t0, a->imm);
++
++    tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEUW);
++
++    gen_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rd]);
++
++    mark_fs_dirty(ctx);
++    tcg_temp_free(t0);
++    return true;
++}
++
++static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_get_gpr(t0, a->rs1);
++    tcg_gen_addi_tl(t0, t0, a->imm);
++
++    tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUW);
++
++    mark_fs_dirty(ctx);
++    tcg_temp_free(t0);
++    return true;
++}
++
++static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
++                       cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
++                       cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fnmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
++                        cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fnmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
++                        cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fadd_h(cpu_fpr[a->rd], cpu_env,
++                      cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fsub_h(cpu_fpr[a->rd], cpu_env,
++                      cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fmul_h(cpu_fpr[a->rd], cpu_env,
++                      cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fdiv_h(cpu_fpr[a->rd], cpu_env,
++                      cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fsqrt_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    if (a->rs1 == a->rs2) { /* FMOV */
++        gen_check_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
++    } else {
++        TCGv_i64 rs1 = tcg_temp_new_i64();
++        TCGv_i64 rs2 = tcg_temp_new_i64();
++
++        gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
++        gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
++
++        /* This formulation retains the nanboxing of rs2. */
++        tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 15);
++        tcg_temp_free_i64(rs1);
++        tcg_temp_free_i64(rs2);
++    }
++
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
++{
++    TCGv_i64 rs1, rs2, mask;
++
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    rs1 = tcg_temp_new_i64();
++    gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
++
++    if (a->rs1 == a->rs2) { /* FNEG */
++        tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(15, 1));
++    } else {
++        rs2 = tcg_temp_new_i64();
++        gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
++
++        /*
++         * Replace bit 15 in rs1 with inverse in rs2.
++         * This formulation retains the nanboxing of rs1.
++         */
++        mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1));
++        tcg_gen_not_i64(rs2, rs2);
++        tcg_gen_andc_i64(rs2, rs2, mask);
++        tcg_gen_and_i64(rs1, mask, rs1);
++        tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
++
++        tcg_temp_free_i64(mask);
++        tcg_temp_free_i64(rs2);
++    }
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
++{
++    TCGv_i64 rs1, rs2;
++
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    rs1 = tcg_temp_new_i64();
++    gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
++
++    if (a->rs1 == a->rs2) { /* FABS */
++        tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(15, 1));
++    } else {
++        rs2 = tcg_temp_new_i64();
++        gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
++
++        /*
++         * Xor bit 15 in rs1 with that in rs2.
++         * This formulation retains the nanboxing of rs1.
++         */
++        tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(15, 1));
++        tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
++
++        tcg_temp_free_i64(rs2);
++    }
++
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_helper_fmin_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
++                      cpu_fpr[a->rs2]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_helper_fmax_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
++                      cpu_fpr[a->rs2]);
++    mark_fs_dirty(ctx);
++    return true;
++}
++
++static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_s_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
++
++    mark_fs_dirty(ctx);
++
++    return true;
++}
++
++static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    REQUIRE_EXT(ctx, RVD);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_d_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
++
++    mark_fs_dirty(ctx);
++
++
++    return true;
++}
++
++static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_h_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
++
++    mark_fs_dirty(ctx);
++
++    return true;
++}
++
++static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    REQUIRE_EXT(ctx, RVD);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_h_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
++
++    mark_fs_dirty(ctx);
++
++    return true;
++}
++
++static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    TCGv t0 = tcg_temp_new();
++    gen_helper_feq_h(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++    return true;
++}
++
++static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    TCGv t0 = tcg_temp_new();
++    gen_helper_flt_h(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++    return true;
++}
++
++static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++    TCGv t0 = tcg_temp_new();
++    gen_helper_fle_h(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++    return true;
++}
++
++static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++
++    gen_helper_fclass_h(t0, cpu_fpr[a->rs1]);
++
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_w_h(t0, cpu_env, cpu_fpr[a->rs1]);
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_wu_h(t0, cpu_env, cpu_fpr[a->rs1]);
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_get_gpr(t0, a->rs1);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_h_w(cpu_fpr[a->rd], cpu_env, t0);
++
++    mark_fs_dirty(ctx);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_get_gpr(t0, a->rs1);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_h_wu(cpu_fpr[a->rd], cpu_env, t0);
++
++    mark_fs_dirty(ctx);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++
++#if defined(TARGET_RISCV64)
++    tcg_gen_ext16s_tl(t0, cpu_fpr[a->rs1]); // 16 bits->64 bits
++#else
++    tcg_gen_extrl_i64_i32(t0, cpu_fpr[a->rs1]); //16 bits->32 bits
++    tcg_gen_ext16s_tl(t0, t0);
++#endif
++
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_get_gpr(t0, a->rs1);
++
++    tcg_gen_extu_tl_i64(cpu_fpr[a->rd], t0);
++    gen_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rd]);
++
++    mark_fs_dirty(ctx);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++#ifdef TARGET_RISCV64
++
++static bool trans_fcvt_l_h(DisasContext *ctx, arg_fcvt_l_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_l_h(t0, cpu_env, cpu_fpr[a->rs1]);
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fcvt_lu_h(DisasContext *ctx, arg_fcvt_lu_h *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_lu_h(t0, cpu_env, cpu_fpr[a->rs1]);
++    gen_set_gpr(a->rd, t0);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fcvt_h_l(DisasContext *ctx, arg_fcvt_h_l *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_get_gpr(t0, a->rs1);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_h_l(cpu_fpr[a->rd], cpu_env, t0);
++
++    mark_fs_dirty(ctx);
++    tcg_temp_free(t0);
++
++    return true;
++}
++
++static bool trans_fcvt_h_lu(DisasContext *ctx, arg_fcvt_h_lu *a)
++{
++    REQUIRE_FPU;
++    REQUIRE_ZFH(ctx);
++
++    TCGv t0 = tcg_temp_new();
++    gen_get_gpr(t0, a->rs1);
++
++    gen_set_rm(ctx, a->rm);
++    gen_helper_fcvt_h_lu(cpu_fpr[a->rd], cpu_env, t0);
++
++    mark_fs_dirty(ctx);
++    tcg_temp_free(t0);
++
++    return true;
++}
++#endif
+diff --git a/target/riscv/internals.h b/target/riscv/internals.h
+index b15ad394bb..bce91da11a 100644
+--- a/target/riscv/internals.h
++++ b/target/riscv/internals.h
+@@ -58,4 +58,20 @@ static inline float32 check_nanbox_s(uint64_t f)
+     }
+ }
+ 
++static inline uint64_t nanbox_h(float16 f)
++{
++    return f | MAKE_64BIT_MASK(16, 48);
++}
++
++static inline float16 check_nanbox_h(uint64_t f)
++{
++    uint64_t mask = MAKE_64BIT_MASK(16, 48);
++
++    if (likely((f & mask) == mask)) {
++        return (uint16_t)f;
++    } else {
++        return 0x7E00u; /* default qnan */
++    }
++}
++
+ #endif
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index 2f9f5ccc62..045475a63b 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -56,6 +56,7 @@ typedef struct DisasContext {
+        to reset this known value.  */
+     int frm;
+     bool ext_ifencei;
++    bool ext_zfh;
+     bool hlsx;
+     /* vector extension */
+     bool vill;
+@@ -89,6 +90,11 @@ static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
+     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
+ }
+ 
++static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in)
++{
++    tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48));
++}
++
+ /*
+  * A narrow n-bit operation, where n < FLEN, checks that input operands
+  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
+@@ -97,6 +103,16 @@ static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
+  *
+  * Here, the result is always nan-boxed, even the canonical nan.
+  */
++static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in)
++{
++    TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull);
++    TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull);
++
++    tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
++    tcg_temp_free_i64(t_max);
++    tcg_temp_free_i64(t_nan);
++}
++
+ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
+ {
+     TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
+@@ -589,6 +605,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
+ #include "insn_trans/trans_rvd.c.inc"
+ #include "insn_trans/trans_rvh.c.inc"
+ #include "insn_trans/trans_rvv.c.inc"
++#include "insn_trans/trans_rvzfh.c.inc"
+ #include "insn_trans/trans_privileged.c.inc"
+ 
+ /* Include the auto-generated decoder for 16 bit insn */
+@@ -640,6 +657,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+     ctx->misa = env->misa;
+     ctx->frm = -1;  /* unknown rounding mode */
+     ctx->ext_ifencei = cpu->cfg.ext_ifencei;
++    ctx->ext_zfh = cpu->cfg.ext_zfh;
+     ctx->vlen = cpu->cfg.vlen;
+     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
+     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
+-- 
+2.33.1
+

+ 64 - 0
recipes-devtools/qemu/qemu/0005-target-riscv-fix-TB_FLAGS-bits-overlapping-bug-for-r.patch

@@ -0,0 +1,64 @@
+From e408ddf0f9da8e643c61d956c7a694192299f539 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 19 Feb 2021 16:48:29 +0800
+Subject: [PATCH 005/107] target/riscv: fix TB_FLAGS bits overlapping bug for
+ rvv/rvh
+
+TB_FLAGS mem_idx bits was extended from 2 bits to 3 bits in
+commit: c445593, but other TB_FLAGS bits for rvv and rvh were
+not shift as well so these bits may overlap with each other when
+rvv is enabled.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/cpu.h       | 12 ++++++------
+ target/riscv/translate.c |  2 +-
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index 6362394204..9b599f083d 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -379,7 +379,6 @@ void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
+ target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
+ void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
+ 
+-#define TB_FLAGS_MMU_MASK   7
+ #define TB_FLAGS_PRIV_MMU_MASK                3
+ #define TB_FLAGS_PRIV_HYP_ACCESS_MASK   (1 << 2)
+ #define TB_FLAGS_MSTATUS_FS MSTATUS_FS
+@@ -388,12 +387,13 @@ typedef CPURISCVState CPUArchState;
+ typedef RISCVCPU ArchCPU;
+ #include "exec/cpu-all.h"
+ 
+-FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1)
+-FIELD(TB_FLAGS, LMUL, 3, 2)
+-FIELD(TB_FLAGS, SEW, 5, 3)
+-FIELD(TB_FLAGS, VILL, 8, 1)
++FIELD(TB_FLAGS, MEM_IDX, 0, 3)
++FIELD(TB_FLAGS, VL_EQ_VLMAX, 3, 1)
++FIELD(TB_FLAGS, LMUL, 4, 2)
++FIELD(TB_FLAGS, SEW, 6, 3)
++FIELD(TB_FLAGS, VILL, 9, 1)
+ /* Is a Hypervisor instruction load/store allowed? */
+-FIELD(TB_FLAGS, HLSX, 9, 1)
++FIELD(TB_FLAGS, HLSX, 10, 1)
+ 
+ bool riscv_cpu_is_32bit(CPURISCVState *env);
+ 
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index 045475a63b..b199bec4b4 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -642,7 +642,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+     uint32_t tb_flags = ctx->base.tb->flags;
+ 
+     ctx->pc_succ_insn = ctx->base.pc_first;
+-    ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
++    ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
+     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
+     ctx->priv_ver = env->priv_ver;
+ #if !defined(CONFIG_USER_ONLY)
+-- 
+2.33.1
+

+ 46 - 0
recipes-devtools/qemu/qemu/0006-fpu-softfloat-set-invalid-excp-flag-for-RISC-V-mulad.patch

@@ -0,0 +1,46 @@
+From 410eaceb105782c98f8d9899c1b316f709a6be5e Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 19 Apr 2021 12:27:56 +0800
+Subject: [PATCH 006/107] fpu/softfloat: set invalid excp flag for RISC-V
+ muladd instructions
+
+In IEEE 754-2008 spec:
+  Invalid operation exception is signaled when doing:
+  fusedMultiplyAdd(0, Inf, c) or fusedMultiplyAdd(Inf, 0, c)
+  unless c is a quiet NaN; if c is a quiet NaN then it is
+  implementation defined whether the invalid operation exception
+  is signaled.
+
+In RISC-V Unprivileged ISA spec:
+  The fused multiply-add instructions must set the invalid
+  operation exception flag when the multiplicands are Inf and
+  zero, even when the addend is a quiet NaN.
+
+This commit set invalid operation execption flag for RISC-V when
+multiplicands of muladd instructions are Inf and zero.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ fpu/softfloat-specialize.c.inc | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc
+index c2f87addb2..12f29fbfc5 100644
+--- a/fpu/softfloat-specialize.c.inc
++++ b/fpu/softfloat-specialize.c.inc
+@@ -624,6 +624,12 @@ static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+     } else {
+         return 1;
+     }
++#elif defined(TARGET_RISCV)
++    /* For RISC-V, InvalidOp is set when multiplicands are Inf and zero */
++    if (infzero) {
++        float_raise(float_flag_invalid, status);
++    }
++    return 3; /* deafult NaN */
+ #elif defined(TARGET_XTENSA)
+     /*
+      * For Xtensa, the (inf,zero,nan) case sets InvalidOp and returns
+-- 
+2.33.1
+

+ 60 - 0
recipes-devtools/qemu/qemu/0007-target-riscv-Fixup-saturate-subtract-function.patch

@@ -0,0 +1,60 @@
+From cf90aa93ab52ea954d20734e7d8df906433636e5 Mon Sep 17 00:00:00 2001
+From: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Date: Fri, 12 Feb 2021 23:02:21 +0800
+Subject: [PATCH 007/107] target/riscv: Fixup saturate subtract function
+
+The overflow predication ((a - b) ^ a) & (a ^ b) & INT64_MIN is right.
+However, when the predication is ture and a is 0, it should return maximum.
+
+Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+Message-id: 20210212150256.885-4-zhiwei_liu@c-sky.com
+Message-Id: <20210212150256.885-4-zhiwei_liu@c-sky.com>
+---
+ target/riscv/vector_helper.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index a156573d28..356cef8a09 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -2451,7 +2451,7 @@ static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+ {
+     int8_t res = a - b;
+     if ((res ^ a) & (a ^ b) & INT8_MIN) {
+-        res = a > 0 ? INT8_MAX : INT8_MIN;
++        res = a >= 0 ? INT8_MAX : INT8_MIN;
+         env->vxsat = 0x1;
+     }
+     return res;
+@@ -2461,7 +2461,7 @@ static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
+ {
+     int16_t res = a - b;
+     if ((res ^ a) & (a ^ b) & INT16_MIN) {
+-        res = a > 0 ? INT16_MAX : INT16_MIN;
++        res = a >= 0 ? INT16_MAX : INT16_MIN;
+         env->vxsat = 0x1;
+     }
+     return res;
+@@ -2471,7 +2471,7 @@ static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+ {
+     int32_t res = a - b;
+     if ((res ^ a) & (a ^ b) & INT32_MIN) {
+-        res = a > 0 ? INT32_MAX : INT32_MIN;
++        res = a >= 0 ? INT32_MAX : INT32_MIN;
+         env->vxsat = 0x1;
+     }
+     return res;
+@@ -2481,7 +2481,7 @@ static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+ {
+     int64_t res = a - b;
+     if ((res ^ a) & (a ^ b) & INT64_MIN) {
+-        res = a > 0 ? INT64_MAX : INT64_MIN;
++        res = a >= 0 ? INT64_MAX : INT64_MIN;
+         env->vxsat = 0x1;
+     }
+     return res;
+-- 
+2.33.1
+

+ 42 - 0
recipes-devtools/qemu/qemu/0008-target-riscv-fix-vrgather-macro-index-variable-type-.patch

@@ -0,0 +1,42 @@
+From 9c254f0ff496098e3fbc2bad0cca89b031467280 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 16 Apr 2021 17:34:56 +0800
+Subject: [PATCH 008/107] target/riscv: fix vrgather macro index variable type
+ bug
+
+ETYPE may be type of uint64_t, thus index variable has to be declared as
+type of uint64_t, too. Otherwise the value read from vs1 register may be
+truncated to type of uint32_t.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/vector_helper.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 356cef8a09..4651a1e224 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4796,7 +4796,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+     uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+-    uint32_t index, i;                                                    \
++    uint64_t index;                                                       \
++    uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+         if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
+@@ -4826,7 +4827,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+     uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+-    uint32_t index = s1, i;                                               \
++    uint64_t index = s1;                                                  \
++    uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+         if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
+-- 
+2.33.1
+

+ 62 - 0
recipes-devtools/qemu/qemu/0009-target-riscv-drop-vector-0.7.1-and-add-1.0-support.patch

@@ -0,0 +1,62 @@
+From b73aa71fc7af86158c95c3410146e7bb60f5d612 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 3 May 2021 17:41:19 +0800
+Subject: [PATCH 009/107] target/riscv: drop vector 0.7.1 and add 1.0 support
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu.c | 8 ++++----
+ target/riscv/cpu.h | 2 +-
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
+index 636d006f80..4a29175e20 100644
+--- a/target/riscv/cpu.c
++++ b/target/riscv/cpu.c
+@@ -380,7 +380,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+     CPURISCVState *env = &cpu->env;
+     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
+     int priv_version = PRIV_VERSION_1_11_0;
+-    int vext_version = VEXT_VERSION_0_07_1;
++    int vext_version = VEXT_VERSION_1_00_0;
+     target_ulong target_misa = env->misa;
+     Error *local_err = NULL;
+ 
+@@ -498,8 +498,8 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+                 return;
+             }
+             if (cpu->cfg.vext_spec) {
+-                if (!g_strcmp0(cpu->cfg.vext_spec, "v0.7.1")) {
+-                    vext_version = VEXT_VERSION_0_07_1;
++                if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
++                    vext_version = VEXT_VERSION_1_00_0;
+                 } else {
+                     error_setg(errp,
+                            "Unsupported vector spec version '%s'",
+@@ -508,7 +508,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+                 }
+             } else {
+                 qemu_log("vector version is not specified, "
+-                        "use the default value v0.7.1\n");
++                         "use the default value v1.0\n");
+             }
+             set_vext_version(env, vext_version);
+         }
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index 9b599f083d..dc4271be2a 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -86,7 +86,7 @@ enum {
+ #define PRIV_VERSION_1_10_0 0x00011000
+ #define PRIV_VERSION_1_11_0 0x00011100
+ 
+-#define VEXT_VERSION_0_07_1 0x00000701
++#define VEXT_VERSION_1_00_0 0x00010000
+ 
+ enum {
+     TRANSLATE_SUCCESS,
+-- 
+2.33.1
+

+ 28 - 0
recipes-devtools/qemu/qemu/0010-target-riscv-Use-FIELD_EX32-to-extract-wd-field.patch

@@ -0,0 +1,28 @@
+From b427b1659dabe8c0d00c7ac17ff1f5bf41edb0a8 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 16 Jul 2020 01:55:44 +0800
+Subject: [PATCH 010/107] target/riscv: Use FIELD_EX32() to extract wd field
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/vector_helper.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 4651a1e224..3f1ee31146 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -98,7 +98,7 @@ static inline uint32_t vext_lmul(uint32_t desc)
+ 
+ static uint32_t vext_wd(uint32_t desc)
+ {
+-    return (simd_data(desc) >> 11) & 0x1;
++    return FIELD_EX32(simd_data(desc), VDATA, WD);
+ }
+ 
+ /*
+-- 
+2.33.1
+

+ 171 - 0
recipes-devtools/qemu/qemu/0011-target-riscv-rvv-1.0-add-mstatus-VS-field.patch

@@ -0,0 +1,171 @@
+From 7d7508b495c38f2f1c592ea79183910a31836e30 Mon Sep 17 00:00:00 2001
+From: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Date: Thu, 16 Jul 2020 02:06:19 +0800
+Subject: [PATCH 011/107] target/riscv: rvv-1.0: add mstatus VS field
+
+Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu.h        |  7 +++++++
+ target/riscv/cpu_bits.h   |  1 +
+ target/riscv/cpu_helper.c | 15 ++++++++++++++-
+ target/riscv/csr.c        | 25 ++++++++++++++++++++++++-
+ 4 files changed, 46 insertions(+), 2 deletions(-)
+
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index dc4271be2a..8fd7c01567 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -336,6 +336,7 @@ int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+ int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
+ bool riscv_cpu_fp_enabled(CPURISCVState *env);
++bool riscv_cpu_vector_enabled(CPURISCVState *env);
+ bool riscv_cpu_virt_enabled(CPURISCVState *env);
+ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
+ bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env);
+@@ -382,6 +383,7 @@ void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
+ #define TB_FLAGS_PRIV_MMU_MASK                3
+ #define TB_FLAGS_PRIV_HYP_ACCESS_MASK   (1 << 2)
+ #define TB_FLAGS_MSTATUS_FS MSTATUS_FS
++#define TB_FLAGS_MSTATUS_VS MSTATUS_VS
+ 
+ typedef CPURISCVState CPUArchState;
+ typedef RISCVCPU ArchCPU;
+@@ -437,6 +439,7 @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
+ 
+ #ifdef CONFIG_USER_ONLY
+     flags |= TB_FLAGS_MSTATUS_FS;
++    flags |= TB_FLAGS_MSTATUS_VS;
+ #else
+     flags |= cpu_mmu_index(env, 0);
+     if (riscv_cpu_fp_enabled(env)) {
+@@ -451,6 +454,10 @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
+             flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
+         }
+     }
++
++    if (riscv_cpu_vector_enabled(env)) {
++        flags |= env->mstatus & MSTATUS_VS;
++    }
+ #endif
+ 
+     *pflags = flags;
+diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
+index caf4599207..f88aae628f 100644
+--- a/target/riscv/cpu_bits.h
++++ b/target/riscv/cpu_bits.h
+@@ -371,6 +371,7 @@
+ #define MSTATUS_UBE         0x00000040
+ #define MSTATUS_MPIE        0x00000080
+ #define MSTATUS_SPP         0x00000100
++#define MSTATUS_VS          0x00000600
+ #define MSTATUS_MPP         0x00001800
+ #define MSTATUS_FS          0x00006000
+ #define MSTATUS_XS          0x00018000
+diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
+index 21c54ef561..8605e23a7b 100644
+--- a/target/riscv/cpu_helper.c
++++ b/target/riscv/cpu_helper.c
+@@ -109,11 +109,24 @@ bool riscv_cpu_fp_enabled(CPURISCVState *env)
+     return false;
+ }
+ 
++/* Return true is vector support is currently enabled */
++bool riscv_cpu_vector_enabled(CPURISCVState *env)
++{
++    if (env->mstatus & MSTATUS_VS) {
++        if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
++            return false;
++        }
++        return true;
++    }
++
++    return false;
++}
++
+ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
+ {
+     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
+                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
+-                            MSTATUS64_UXL;
++                            MSTATUS64_UXL | MSTATUS_VS;
+     bool current_virt = riscv_cpu_virt_enabled(env);
+ 
+     g_assert(riscv_has_ext(env, RVH));
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index d2585395bf..f99fc60bd3 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -260,6 +260,7 @@ static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val)
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+     env->mstatus |= MSTATUS_FS;
++    env->mstatus |= MSTATUS_VS;
+ #endif
+     env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
+     if (vs(env, csrno) >= 0) {
+@@ -290,6 +291,13 @@ static int read_vxrm(CPURISCVState *env, int csrno, target_ulong *val)
+ 
+ static int write_vxrm(CPURISCVState *env, int csrno, target_ulong val)
+ {
++#if !defined(CONFIG_USER_ONLY)
++    if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
++        return -RISCV_EXCP_ILLEGAL_INST;
++    }
++    env->mstatus |= MSTATUS_VS;
++#endif
++
+     env->vxrm = val;
+     return 0;
+ }
+@@ -302,6 +310,13 @@ static int read_vxsat(CPURISCVState *env, int csrno, target_ulong *val)
+ 
+ static int write_vxsat(CPURISCVState *env, int csrno, target_ulong val)
+ {
++#if !defined(CONFIG_USER_ONLY)
++    if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
++        return -RISCV_EXCP_ILLEGAL_INST;
++    }
++    env->mstatus |= MSTATUS_VS;
++#endif
++
+     env->vxsat = val;
+     return 0;
+ }
+@@ -314,6 +329,13 @@ static int read_vstart(CPURISCVState *env, int csrno, target_ulong *val)
+ 
+ static int write_vstart(CPURISCVState *env, int csrno, target_ulong val)
+ {
++#if !defined(CONFIG_USER_ONLY)
++    if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
++        return -RISCV_EXCP_ILLEGAL_INST;
++    }
++    env->mstatus |= MSTATUS_VS;
++#endif
++
+     env->vstart = val;
+     return 0;
+ }
+@@ -478,7 +500,7 @@ static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val)
+     mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
+         MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
+         MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
+-        MSTATUS_TW;
++        MSTATUS_TW | MSTATUS_VS;
+ 
+     if (!riscv_cpu_is_32bit(env)) {
+         /*
+@@ -491,6 +513,7 @@ static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val)
+     mstatus = (mstatus & ~mask) | (val & mask);
+ 
+     dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) |
++            ((mstatus & MSTATUS_VS) == MSTATUS_VS) |
+             ((mstatus & MSTATUS_XS) == MSTATUS_XS);
+     mstatus = set_field(mstatus, MSTATUS_SD, dirty);
+     env->mstatus = mstatus;
+-- 
+2.33.1
+

+ 42 - 0
recipes-devtools/qemu/qemu/0012-target-riscv-rvv-1.0-add-sstatus-VS-field.patch

@@ -0,0 +1,42 @@
+From 15d97d2faf70ecb7ec89331cbb4cf651d45824e5 Mon Sep 17 00:00:00 2001
+From: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Date: Thu, 30 Apr 2020 16:36:09 +0800
+Subject: [PATCH 012/107] target/riscv: rvv-1.0: add sstatus VS field
+
+Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu_bits.h | 1 +
+ target/riscv/csr.c      | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
+index f88aae628f..2073abfabf 100644
+--- a/target/riscv/cpu_bits.h
++++ b/target/riscv/cpu_bits.h
+@@ -415,6 +415,7 @@
+ #define SSTATUS_UPIE        0x00000010
+ #define SSTATUS_SPIE        0x00000020
+ #define SSTATUS_SPP         0x00000100
++#define SSTATUS_VS          0x00000600
+ #define SSTATUS_FS          0x00006000
+ #define SSTATUS_XS          0x00018000
+ #define SSTATUS_PUM         0x00040000 /* until: priv-1.9.1 */
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index f99fc60bd3..2131c12f7f 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -440,7 +440,7 @@ static const target_ulong delegable_excps =
+     (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT));
+ static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
+     SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
+-    SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD;
++    SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD | SSTATUS_VS;
+ static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
+ static const target_ulong hip_writable_mask = MIP_VSSIP;
+ static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
+-- 
+2.33.1
+

+ 33 - 0
recipes-devtools/qemu/qemu/0013-target-riscv-rvv-1.0-introduce-writable-misa.v-field.patch

@@ -0,0 +1,33 @@
+From 16c565b6660801c6d9b5e83516d61293e957fc9a Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 30 Jul 2020 16:42:55 +0800
+Subject: [PATCH 013/107] target/riscv: rvv-1.0: introduce writable misa.v
+ field
+
+Implementations may have a writable misa.v field. Analogous to the way
+in which the floating-point unit is handled, the mstatus.vs field may
+exist even if misa.v is clear.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/csr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index 2131c12f7f..3a77d2cb86 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -572,7 +572,7 @@ static int write_misa(CPURISCVState *env, int csrno, target_ulong val)
+     val &= env->misa_mask;
+ 
+     /* Mask extensions that are not supported by QEMU */
+-    val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
++    val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
+ 
+     /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
+     if ((val & RVD) && !(val & RVF)) {
+-- 
+2.33.1
+

+ 486 - 0
recipes-devtools/qemu/qemu/0014-target-riscv-rvv-1.0-add-translation-time-vector-con.patch

@@ -0,0 +1,486 @@
+From d2115a910cd95567bbd0526d7c5766e35d14c4bf Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 5 Oct 2020 15:21:15 +0800
+Subject: [PATCH 014/107] target/riscv: rvv-1.0: add translation-time vector
+ context status
+
+Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 69 ++++++++++++++++++++-----
+ target/riscv/translate.c                | 33 ++++++++++++
+ 2 files changed, 90 insertions(+), 12 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 887c6b8883..56ce39e769 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -41,6 +41,7 @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
+     gen_get_gpr(s2, a->rs2);
+     gen_helper_vsetvl(dst, cpu_env, s1, s2);
+     gen_set_gpr(a->rd, dst);
++    mark_vs_dirty(ctx);
+     tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
+     lookup_and_goto_ptr(ctx);
+     ctx->base.is_jmp = DISAS_NORETURN;
+@@ -72,6 +73,7 @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
+     }
+     gen_helper_vsetvl(dst, cpu_env, s1, s2);
+     gen_set_gpr(a->rd, dst);
++    mark_vs_dirty(ctx);
+     gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
+     ctx->base.is_jmp = DISAS_NORETURN;
+ 
+@@ -163,7 +165,8 @@ typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
+                                 TCGv_env, TCGv_i32);
+ 
+ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+-                          gen_helper_ldst_us *fn, DisasContext *s)
++                          gen_helper_ldst_us *fn, DisasContext *s,
++                          bool is_store)
+ {
+     TCGv_ptr dest, mask;
+     TCGv base;
+@@ -195,6 +198,9 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+     tcg_temp_free_ptr(mask);
+     tcg_temp_free(base);
+     tcg_temp_free_i32(desc);
++    if (!is_store) {
++        mark_vs_dirty(s);
++    }
+     gen_set_label(over);
+     return true;
+ }
+@@ -245,7 +251,7 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+-    return ldst_us_trans(a->rd, a->rs1, data, fn, s);
++    return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
+ }
+ 
+ static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
+@@ -298,7 +304,7 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+-    return ldst_us_trans(a->rd, a->rs1, data, fn, s);
++    return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
+ }
+ 
+ static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
+@@ -321,7 +327,7 @@ typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
+ 
+ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
+                               uint32_t data, gen_helper_ldst_stride *fn,
+-                              DisasContext *s)
++                              DisasContext *s, bool is_store)
+ {
+     TCGv_ptr dest, mask;
+     TCGv base, stride;
+@@ -348,6 +354,9 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
+     tcg_temp_free(base);
+     tcg_temp_free(stride);
+     tcg_temp_free_i32(desc);
++    if (!is_store) {
++        mark_vs_dirty(s);
++    }
+     gen_set_label(over);
+     return true;
+ }
+@@ -382,7 +391,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+-    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
++    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
+ }
+ 
+ static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
+@@ -426,7 +435,7 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+         return false;
+     }
+ 
+-    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
++    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
+ }
+ 
+ static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
+@@ -449,7 +458,7 @@ typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
+ 
+ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+                              uint32_t data, gen_helper_ldst_index *fn,
+-                             DisasContext *s)
++                             DisasContext *s, bool is_store)
+ {
+     TCGv_ptr dest, mask, index;
+     TCGv base;
+@@ -476,6 +485,9 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+     tcg_temp_free_ptr(index);
+     tcg_temp_free(base);
+     tcg_temp_free_i32(desc);
++    if (!is_store) {
++        mark_vs_dirty(s);
++    }
+     gen_set_label(over);
+     return true;
+ }
+@@ -510,7 +522,7 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+-    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
++    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
+ }
+ 
+ /*
+@@ -562,7 +574,7 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+-    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
++    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
+ }
+ 
+ static bool st_index_check(DisasContext *s, arg_rnfvm* a)
+@@ -606,6 +618,7 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+     tcg_temp_free_ptr(mask);
+     tcg_temp_free(base);
+     tcg_temp_free_i32(desc);
++    mark_vs_dirty(s);
+     gen_set_label(over);
+     return true;
+ }
+@@ -685,6 +698,7 @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+     tcg_temp_free_ptr(index);
+     tcg_temp_free(base);
+     tcg_temp_free_i32(desc);
++    mark_vs_dirty(s);
+     gen_set_label(over);
+     return true;
+ }
+@@ -832,6 +846,7 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
+                            vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
+                            cpu_env, 0, s->vlen / 8, data, fn);
+     }
++    mark_vs_dirty(s);
+     gen_set_label(over);
+     return true;
+ }
+@@ -886,6 +901,7 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
+     tcg_temp_free_ptr(src2);
+     tcg_temp_free(src1);
+     tcg_temp_free_i32(desc);
++    mark_vs_dirty(s);
+     gen_set_label(over);
+     return true;
+ }
+@@ -920,6 +936,7 @@ do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
+ 
+         tcg_temp_free_i64(src1);
+         tcg_temp_free(tmp);
++        mark_vs_dirty(s);
+         return true;
+     }
+     return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
+@@ -1033,6 +1050,7 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
+     tcg_temp_free_ptr(src2);
+     tcg_temp_free(src1);
+     tcg_temp_free_i32(desc);
++    mark_vs_dirty(s);
+     gen_set_label(over);
+     return true;
+ }
+@@ -1056,10 +1074,10 @@ do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
+             gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
+                     sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
+         }
+-    } else {
+-        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
++        mark_vs_dirty(s);
++        return true;
+     }
+-    return true;
++    return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
+ }
+ 
+ /* OPIVI with GVEC IR */
+@@ -1120,6 +1138,7 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
+                            vreg_ofs(s, a->rs2),
+                            cpu_env, 0, s->vlen / 8,
+                            data, fn);
++        mark_vs_dirty(s);
+         gen_set_label(over);
+         return true;
+     }
+@@ -1207,6 +1226,7 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
+                            vreg_ofs(s, a->rs1),
+                            vreg_ofs(s, a->rs2),
+                            cpu_env, 0, s->vlen / 8, data, fn);
++        mark_vs_dirty(s);
+         gen_set_label(over);
+         return true;
+     }
+@@ -1285,6 +1305,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+                            vreg_ofs(s, a->rs1),                    \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fns[s->sew]);        \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -1416,6 +1437,7 @@ do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
+ 
+         tcg_temp_free_i32(src1);
+         tcg_temp_free(tmp);
++        mark_vs_dirty(s);
+         return true;
+     }
+     return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
+@@ -1474,6 +1496,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+                            vreg_ofs(s, a->rs1),                    \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fns[s->sew]);        \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -1657,6 +1680,7 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
+                                cpu_env, 0, s->vlen / 8, data, fns[s->sew]);
+             gen_set_label(over);
+         }
++        mark_vs_dirty(s);
+         return true;
+     }
+     return false;
+@@ -1699,6 +1723,7 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
+         }
+ 
+         tcg_temp_free(s1);
++        mark_vs_dirty(s);
+         gen_set_label(over);
+         return true;
+     }
+@@ -1714,6 +1739,7 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
+         if (s->vl_eq_vlmax) {
+             tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
+                                  MAXSZ(s), MAXSZ(s), simm);
++            mark_vs_dirty(s);
+         } else {
+             TCGv_i32 desc;
+             TCGv_i64 s1;
+@@ -1735,6 +1761,7 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
+             tcg_temp_free_ptr(dest);
+             tcg_temp_free_i32(desc);
+             tcg_temp_free_i64(s1);
++            mark_vs_dirty(s);
+             gen_set_label(over);
+         }
+         return true;
+@@ -1839,6 +1866,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+                            vreg_ofs(s, a->rs1),                    \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fns[s->sew - 1]);    \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -1874,6 +1902,7 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+     tcg_temp_free_ptr(mask);
+     tcg_temp_free_ptr(src2);
+     tcg_temp_free_i32(desc);
++    mark_vs_dirty(s);
+     gen_set_label(over);
+     return true;
+ }
+@@ -1951,6 +1980,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
+                            vreg_ofs(s, a->rs1),                  \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,      \
+                            s->vlen / 8, data, fns[s->sew - 1]);  \
++        mark_vs_dirty(s);                                        \
+         gen_set_label(over);                                     \
+         return true;                                             \
+     }                                                            \
+@@ -2025,6 +2055,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+                            vreg_ofs(s, a->rs1),                    \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fns[s->sew - 1]);    \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -2139,6 +2170,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fns[s->sew - 1]);    \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -2211,6 +2243,7 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+         if (s->vl_eq_vlmax) {
+             tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
+                                  MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
++            mark_vs_dirty(s);
+         } else {
+             TCGv_ptr dest;
+             TCGv_i32 desc;
+@@ -2230,6 +2263,7 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+ 
+             tcg_temp_free_ptr(dest);
+             tcg_temp_free_i32(desc);
++            mark_vs_dirty(s);
+             gen_set_label(over);
+         }
+         return true;
+@@ -2279,6 +2313,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fns[s->sew - 1]);    \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -2327,6 +2362,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fns[s->sew - 1]);    \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -2389,6 +2425,7 @@ static bool trans_##NAME(DisasContext *s, arg_r *a)                \
+                            vreg_ofs(s, a->rs1),                    \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+                            s->vlen / 8, data, fn);                 \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -2486,6 +2523,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd),                     \
+                            vreg_ofs(s, 0), vreg_ofs(s, a->rs2),    \
+                            cpu_env, 0, s->vlen / 8, data, fn);     \
++        mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+     }                                                              \
+@@ -2517,6 +2555,7 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+                            vreg_ofs(s, a->rs2), cpu_env, 0,
+                            s->vlen / 8, data, fns[s->sew]);
++        mark_vs_dirty(s);
+         gen_set_label(over);
+         return true;
+     }
+@@ -2542,6 +2581,7 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
+         };
+         tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+                            cpu_env, 0, s->vlen / 8, data, fns[s->sew]);
++        mark_vs_dirty(s);
+         gen_set_label(over);
+         return true;
+     }
+@@ -2717,6 +2757,7 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
+         tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]);
+         vec_element_storei(s, a->rd, 0, t1);
+         tcg_temp_free_i64(t1);
++        mark_vs_dirty(s);
+     done:
+         gen_set_label(over);
+         return true;
+@@ -2767,6 +2808,7 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
+         }
+         vec_element_storei(s, a->rd, 0, t1);
+         tcg_temp_free_i64(t1);
++        mark_vs_dirty(s);
+         gen_set_label(over);
+         return true;
+     }
+@@ -2833,6 +2875,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
+         tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
+                              MAXSZ(s), MAXSZ(s), dest);
+         tcg_temp_free_i64(dest);
++        mark_vs_dirty(s);
+     } else {
+         static gen_helper_opivx * const fns[4] = {
+             gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
+@@ -2859,6 +2902,7 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
+                                  endian_ofs(s, a->rs2, a->rs1),
+                                  MAXSZ(s), MAXSZ(s));
+         }
++        mark_vs_dirty(s);
+     } else {
+         static gen_helper_opivx * const fns[4] = {
+             gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
+@@ -2895,6 +2939,7 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+                            vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
+                            cpu_env, 0, s->vlen / 8, data, fns[s->sew]);
++        mark_vs_dirty(s);
+         gen_set_label(over);
+         return true;
+     }
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index b199bec4b4..c42c52c90c 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -47,6 +47,7 @@ typedef struct DisasContext {
+     bool virt_enabled;
+     uint32_t opcode;
+     uint32_t mstatus_fs;
++    uint32_t mstatus_vs;
+     uint32_t misa;
+     uint32_t mem_idx;
+     /* Remember the rounding mode encoded in the previous fp instruction,
+@@ -407,6 +408,37 @@ static void mark_fs_dirty(DisasContext *ctx)
+ static inline void mark_fs_dirty(DisasContext *ctx) { }
+ #endif
+ 
++#ifndef CONFIG_USER_ONLY
++/* The states of mstatus_vs are:
++ * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
++ * We will have already diagnosed disabled state,
++ * and need to turn initial/clean into dirty.
++ */
++static void mark_vs_dirty(DisasContext *ctx)
++{
++    TCGv tmp;
++    if (ctx->mstatus_vs == MSTATUS_VS) {
++        return;
++    }
++    /* Remember the state change for the rest of the TB.  */
++    ctx->mstatus_vs = MSTATUS_VS;
++
++    tmp = tcg_temp_new();
++    tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
++    tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS | MSTATUS_SD);
++    tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
++
++    if (ctx->virt_enabled) {
++        tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
++        tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS | MSTATUS_SD);
++        tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
++    }
++    tcg_temp_free(tmp);
++}
++#else
++static inline void mark_vs_dirty(DisasContext *ctx) { }
++#endif
++
+ static void gen_set_rm(DisasContext *ctx, int rm)
+ {
+     TCGv_i32 t0;
+@@ -644,6 +676,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+     ctx->pc_succ_insn = ctx->base.pc_first;
+     ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
+     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
++    ctx->mstatus_vs = tb_flags & TB_FLAGS_MSTATUS_VS;
+     ctx->priv_ver = env->priv_ver;
+ #if !defined(CONFIG_USER_ONLY)
+     if (riscv_has_ext(env, RVH)) {
+-- 
+2.33.1
+

+ 60 - 0
recipes-devtools/qemu/qemu/0015-target-riscv-rvv-1.0-remove-rvv-related-codes-from-f.patch

@@ -0,0 +1,60 @@
+From 4e5d37969dbab98adb2f537cf0bb5cbb97c4e98e Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 7 Aug 2020 14:53:53 +0800
+Subject: [PATCH 015/107] target/riscv: rvv-1.0: remove rvv related codes from
+ fcsr registers
+
+* Remove VXRM and VXSAT fields from FCSR register as they are only
+  presented in VCSR register.
+* Remove RVV loose check in fs() predicate function.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/csr.c | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index 3a77d2cb86..7992bdc07e 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -38,10 +38,6 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
+ static int fs(CPURISCVState *env, int csrno)
+ {
+ #if !defined(CONFIG_USER_ONLY)
+-    /* loose check condition for fcsr in vector extension */
+-    if ((csrno == CSR_FCSR) && (env->misa & RVV)) {
+-        return 0;
+-    }
+     if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+@@ -246,10 +242,6 @@ static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val)
+ #endif
+     *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
+         | (env->frm << FSR_RD_SHIFT);
+-    if (vs(env, csrno) >= 0) {
+-        *val |= (env->vxrm << FSR_VXRM_SHIFT)
+-                | (env->vxsat << FSR_VXSAT_SHIFT);
+-    }
+     return 0;
+ }
+ 
+@@ -260,13 +252,8 @@ static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val)
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+     env->mstatus |= MSTATUS_FS;
+-    env->mstatus |= MSTATUS_VS;
+ #endif
+     env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
+-    if (vs(env, csrno) >= 0) {
+-        env->vxrm = (val & FSR_VXRM) >> FSR_VXRM_SHIFT;
+-        env->vxsat = (val & FSR_VXSAT) >> FSR_VXSAT_SHIFT;
+-    }
+     riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
+     return 0;
+ }
+-- 
+2.33.1
+

+ 77 - 0
recipes-devtools/qemu/qemu/0016-target-riscv-rvv-1.0-add-vcsr-register.patch

@@ -0,0 +1,77 @@
+From 4220f76f5873fa15198fae534c29d5b40f2aed76 Mon Sep 17 00:00:00 2001
+From: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Date: Thu, 16 Jul 2020 02:06:51 +0800
+Subject: [PATCH 016/107] target/riscv: rvv-1.0: add vcsr register
+
+Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu_bits.h |  7 +++++++
+ target/riscv/csr.c      | 21 +++++++++++++++++++++
+ 2 files changed, 28 insertions(+)
+
+diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
+index 2073abfabf..9ad27ff280 100644
+--- a/target/riscv/cpu_bits.h
++++ b/target/riscv/cpu_bits.h
+@@ -60,9 +60,16 @@
+ #define CSR_VSTART          0x008
+ #define CSR_VXSAT           0x009
+ #define CSR_VXRM            0x00a
++#define CSR_VCSR            0x00f
+ #define CSR_VL              0xc20
+ #define CSR_VTYPE           0xc21
+ 
++/* VCSR fields */
++#define VCSR_VXSAT_SHIFT    0
++#define VCSR_VXSAT          (0x1 << VCSR_VXSAT_SHIFT)
++#define VCSR_VXRM_SHIFT     1
++#define VCSR_VXRM           (0x3 << VCSR_VXRM_SHIFT)
++
+ /* User Timers and Counters */
+ #define CSR_CYCLE           0xc00
+ #define CSR_TIME            0xc01
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index 7992bdc07e..6e19677318 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -327,6 +327,26 @@ static int write_vstart(CPURISCVState *env, int csrno, target_ulong val)
+     return 0;
+ }
+ 
++static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
++{
++    *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
++    return 0;
++}
++
++static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
++{
++#if !defined(CONFIG_USER_ONLY)
++    if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
++        return -RISCV_EXCP_ILLEGAL_INST;
++    }
++    env->mstatus |= MSTATUS_VS;
++#endif
++
++    env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
++    env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
++    return 0;
++}
++
+ /* User Timers and Counters */
+ static int read_instret(CPURISCVState *env, int csrno, target_ulong *val)
+ {
+@@ -1390,6 +1410,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
+     [CSR_VSTART]   = { "vstart",   vs,     read_vstart,  write_vstart },
+     [CSR_VXSAT]    = { "vxsat",    vs,     read_vxsat,   write_vxsat  },
+     [CSR_VXRM]     = { "vxrm",     vs,     read_vxrm,    write_vxrm   },
++    [CSR_VCSR]     = { "vcsr",     vs,     read_vcsr,    write_vcsr   },
+     [CSR_VL]       = { "vl",       vs,     read_vl                    },
+     [CSR_VTYPE]    = { "vtype",    vs,     read_vtype                 },
+     /* User Timers and Counters */
+-- 
+2.33.1
+

+ 54 - 0
recipes-devtools/qemu/qemu/0017-target-riscv-rvv-1.0-add-vlenb-register.patch

@@ -0,0 +1,54 @@
+From f99c83a4df678dbdcf08ef109e876e3c3c2027c8 Mon Sep 17 00:00:00 2001
+From: Greentime Hu <greentime.hu@sifive.com>
+Date: Thu, 9 Jul 2020 16:16:00 +0800
+Subject: [PATCH 017/107] target/riscv: rvv-1.0: add vlenb register
+
+Signed-off-by: Greentime Hu <greentime.hu@sifive.com>
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu_bits.h | 1 +
+ target/riscv/csr.c      | 7 +++++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
+index 9ad27ff280..4ed4ca091f 100644
+--- a/target/riscv/cpu_bits.h
++++ b/target/riscv/cpu_bits.h
+@@ -63,6 +63,7 @@
+ #define CSR_VCSR            0x00f
+ #define CSR_VL              0xc20
+ #define CSR_VTYPE           0xc21
++#define CSR_VLENB           0xc22
+ 
+ /* VCSR fields */
+ #define VCSR_VXSAT_SHIFT    0
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index 6e19677318..5a0bc9a71f 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -264,6 +264,12 @@ static int read_vtype(CPURISCVState *env, int csrno, target_ulong *val)
+     return 0;
+ }
+ 
++static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
++{
++    *val = env_archcpu(env)->cfg.vlen >> 3;
++    return 0;
++}
++
+ static int read_vl(CPURISCVState *env, int csrno, target_ulong *val)
+ {
+     *val = env->vl;
+@@ -1413,6 +1419,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
+     [CSR_VCSR]     = { "vcsr",     vs,     read_vcsr,    write_vcsr   },
+     [CSR_VL]       = { "vl",       vs,     read_vl                    },
+     [CSR_VTYPE]    = { "vtype",    vs,     read_vtype                 },
++    [CSR_VLENB]    = { "vlenb",    vs,     read_vlenb                 },
+     /* User Timers and Counters */
+     [CSR_CYCLE]    = { "cycle",    ctr,    read_instret  },
+     [CSR_INSTRET]  = { "instret",  ctr,    read_instret  },
+-- 
+2.33.1
+

+ 35 - 0
recipes-devtools/qemu/qemu/0018-target-riscv-rvv-1.0-check-MSTATUS_VS-when-accessing.patch

@@ -0,0 +1,35 @@
+From ea46dc1dd9050c1962ee8e887f14e022d84f4d0c Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 24 Jul 2020 10:56:15 +0800
+Subject: [PATCH 018/107] target/riscv: rvv-1.0: check MSTATUS_VS when
+ accessing vector csr registers
+
+If VS field is off, accessing vector csr registers should raise an
+illegal-instruction exception.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/csr.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index 5a0bc9a71f..e065b042df 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -48,6 +48,11 @@ static int fs(CPURISCVState *env, int csrno)
+ static int vs(CPURISCVState *env, int csrno)
+ {
+     if (env->misa & RVV) {
++#if !defined(CONFIG_USER_ONLY)
++        if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
++            return -RISCV_EXCP_ILLEGAL_INST;
++        }
++#endif
+         return 0;
+     }
+     return -RISCV_EXCP_ILLEGAL_INST;
+-- 
+2.33.1
+

+ 1260 - 0
recipes-devtools/qemu/qemu/0019-target-riscv-rvv-1.0-remove-MLEN-calculations.patch

@@ -0,0 +1,1260 @@
+From 9ea07a7c23db4fb206a05a955f2c68985d3308fd Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 4 Jun 2020 00:00:11 +0800
+Subject: [PATCH 019/107] target/riscv: rvv-1.0: remove MLEN calculations
+
+As in RVV 1.0 design, MLEN is hardcoded with value 1 (Section 4.5).
+Thus, remove all MLEN related calculations.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc |  35 +---
+ target/riscv/internals.h                |   9 +-
+ target/riscv/translate.c                |   2 -
+ target/riscv/vector_helper.c            | 250 ++++++++++--------------
+ 4 files changed, 110 insertions(+), 186 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 56ce39e769..46e18a62b5 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -247,7 +247,6 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+         return false;
+     }
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+@@ -300,7 +299,6 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+         return false;
+     }
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+@@ -387,7 +385,6 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+         return false;
+     }
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+@@ -426,7 +423,6 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+           gen_helper_vsse_v_w,  gen_helper_vsse_v_d }
+     };
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+@@ -518,7 +514,6 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+         return false;
+     }
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+@@ -570,7 +565,6 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+         return false;
+     }
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+@@ -649,7 +643,6 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+         return false;
+     }
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+@@ -760,7 +753,6 @@ static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
+         }
+     }
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, WD, a->wd);
+@@ -839,7 +831,6 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
+     } else {
+         uint32_t data = 0;
+ 
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, VM, a->vm);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+@@ -885,7 +876,6 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
+     src1 = tcg_temp_new();
+     gen_get_gpr(src1, rs1);
+ 
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
+@@ -1034,7 +1024,6 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
+     } else {
+         src1 = tcg_const_tl(sextract64(imm, 0, 5));
+     }
+-    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+     data = FIELD_DP32(data, VDATA, VM, vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
+@@ -1130,7 +1119,6 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ 
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, VM, a->vm);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+@@ -1219,7 +1207,6 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ 
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, VM, a->vm);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+@@ -1298,7 +1285,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+         TCGLabel *over = gen_new_label();                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+@@ -1489,7 +1475,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+         TCGLabel *over = gen_new_label();                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+@@ -1859,7 +1844,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+         gen_set_rm(s, 7);                                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+@@ -1932,7 +1916,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)            \
+             gen_helper_##NAME##_d,                                \
+         };                                                        \
+         gen_set_rm(s, 7);                                         \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);            \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);            \
+         return opfvf_trans(a->rd, a->rs1, a->rs2, data,           \
+@@ -1973,7 +1956,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
+         gen_set_rm(s, 7);                                        \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);        \
+                                                                  \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),   \
+@@ -2011,7 +1993,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
+             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
+         };                                                       \
+         gen_set_rm(s, 7);                                        \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
+         return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
+@@ -2048,7 +2029,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+         gen_set_rm(s, 7);                                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+@@ -2084,7 +2064,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
+             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
+         };                                                       \
+         gen_set_rm(s, 7);                                        \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
+         return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
+@@ -2164,7 +2143,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         gen_set_rm(s, 7);                                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+@@ -2307,7 +2285,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         gen_set_rm(s, 7);                                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+@@ -2356,7 +2333,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         gen_set_rm(s, 7);                                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+@@ -2419,7 +2395,6 @@ static bool trans_##NAME(DisasContext *s, arg_r *a)                \
+         TCGLabel *over = gen_new_label();                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+                            vreg_ofs(s, a->rs1),                    \
+@@ -2449,7 +2424,6 @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
+         TCGv dst;
+         TCGv_i32 desc;
+         uint32_t data = 0;
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, VM, a->vm);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ 
+@@ -2481,7 +2455,6 @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
+         TCGv dst;
+         TCGv_i32 desc;
+         uint32_t data = 0;
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, VM, a->vm);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ 
+@@ -2517,7 +2490,6 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         TCGLabel *over = gen_new_label();                          \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd),                     \
+@@ -2545,7 +2517,6 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ 
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, VM, a->vm);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+         static gen_helper_gvec_3_ptr * const fns[4] = {
+@@ -2572,7 +2543,6 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ 
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, VM, a->vm);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+         static gen_helper_gvec_2_ptr * const fns[4] = {
+@@ -2863,7 +2833,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
+     }
+ 
+     if (a->vm && s->vl_eq_vlmax) {
+-        int vlmax = s->vlen / s->mlen;
++        int vlmax = s->vlen;
+         TCGv_i64 dest = tcg_temp_new_i64();
+ 
+         if (a->rs1 == 0) {
+@@ -2894,7 +2864,7 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
+     }
+ 
+     if (a->vm && s->vl_eq_vlmax) {
+-        if (a->rs1 >= s->vlen / s->mlen) {
++        if (a->rs1 >= s->vlen) {
+             tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
+                                  MAXSZ(s), MAXSZ(s), 0);
+         } else {
+@@ -2934,7 +2904,6 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ 
+-        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+                            vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
+diff --git a/target/riscv/internals.h b/target/riscv/internals.h
+index bce91da11a..81f5dfa477 100644
+--- a/target/riscv/internals.h
++++ b/target/riscv/internals.h
+@@ -22,11 +22,10 @@
+ #include "hw/registerfields.h"
+ 
+ /* share data between vector helpers and decode code */
+-FIELD(VDATA, MLEN, 0, 8)
+-FIELD(VDATA, VM, 8, 1)
+-FIELD(VDATA, LMUL, 9, 2)
+-FIELD(VDATA, NF, 11, 4)
+-FIELD(VDATA, WD, 11, 1)
++FIELD(VDATA, VM, 0, 1)
++FIELD(VDATA, LMUL, 1, 3)
++FIELD(VDATA, NF, 4, 4)
++FIELD(VDATA, WD, 4, 1)
+ 
+ /* float point classify helpers */
+ target_ulong fclass_h(uint64_t frs1);
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index c42c52c90c..b18f76c344 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -64,7 +64,6 @@ typedef struct DisasContext {
+     uint8_t lmul;
+     uint8_t sew;
+     uint16_t vlen;
+-    uint16_t mlen;
+     bool vl_eq_vlmax;
+     CPUState *cs;
+ } DisasContext;
+@@ -696,7 +695,6 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
+     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
+     ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
+-    ctx->mlen = 1 << (ctx->sew  + 3 - ctx->lmul);
+     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
+     ctx->cs = cs;
+ }
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 3f1ee31146..dea1d190ed 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -81,11 +81,6 @@ static inline uint32_t vext_nf(uint32_t desc)
+     return FIELD_EX32(simd_data(desc), VDATA, NF);
+ }
+ 
+-static inline uint32_t vext_mlen(uint32_t desc)
+-{
+-    return FIELD_EX32(simd_data(desc), VDATA, MLEN);
+-}
+-
+ static inline uint32_t vext_vm(uint32_t desc)
+ {
+     return FIELD_EX32(simd_data(desc), VDATA, VM);
+@@ -188,19 +183,24 @@ static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+     vext_clear(cur, cnt, tot);
+ }
+ 
+-static inline void vext_set_elem_mask(void *v0, int mlen, int index,
++static inline void vext_set_elem_mask(void *v0, int index,
+         uint8_t value)
+ {
+-    int idx = (index * mlen) / 64;
+-    int pos = (index * mlen) % 64;
++    int idx = index / 64;
++    int pos = index % 64;
+     uint64_t old = ((uint64_t *)v0)[idx];
+-    ((uint64_t *)v0)[idx] = deposit64(old, pos, mlen, value);
++    ((uint64_t *)v0)[idx] = deposit64(old, pos, 1, value);
+ }
+ 
+-static inline int vext_elem_mask(void *v0, int mlen, int index)
++/*
++ * Earlier designs (pre-0.9) had a varying number of bits
++ * per mask value (MLEN). In the 0.9 design, MLEN=1.
++ * (Section 4.5)
++ */
++static inline int vext_elem_mask(void *v0, int index)
+ {
+-    int idx = (index * mlen) / 64;
+-    int pos = (index * mlen) % 64;
++    int idx = index / 64;
++    int pos = index  % 64;
+     return (((uint64_t *)v0)[idx] >> pos) & 1;
+ }
+ 
+@@ -277,12 +277,11 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+ 
+     /* probe every access*/
+     for (i = 0; i < env->vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         probe_pages(env, base + stride * i, nf * msz, ra, access_type);
+@@ -290,7 +289,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+     /* do real access */
+     for (i = 0; i < env->vl; i++) {
+         k = 0;
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         while (k < nf) {
+@@ -506,12 +505,11 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+     uint32_t vm = vext_vm(desc);
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+ 
+     /* probe every access*/
+     for (i = 0; i < env->vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra,
+@@ -520,7 +518,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+     /* load bytes from guest memory */
+     for (i = 0; i < env->vl; i++) {
+         k = 0;
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         while (k < nf) {
+@@ -604,7 +602,6 @@ vext_ldff(void *vd, void *v0, target_ulong base,
+ {
+     void *host;
+     uint32_t i, k, vl = 0;
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t nf = vext_nf(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+@@ -612,7 +609,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
+ 
+     /* probe every access*/
+     for (i = 0; i < env->vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         addr = base + nf * i * msz;
+@@ -653,7 +650,7 @@ ProbeSuccess:
+     }
+     for (i = 0; i < env->vl; i++) {
+         k = 0;
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         while (k < nf) {
+@@ -784,18 +781,17 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+     target_long addr;
+     uint32_t wd = vext_wd(desc);
+     uint32_t vm = vext_vm(desc);
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+ 
+     for (i = 0; i < env->vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD);
+         probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE);
+     }
+     for (i = 0; i < env->vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         addr = get_index_addr(base, i, vs2);
+@@ -911,13 +907,12 @@ static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
+                        opivv2_fn *fn, clear_fn *clearfn)
+ {
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+ 
+     for (i = 0; i < vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, vs1, vs2, i);
+@@ -976,13 +971,12 @@ static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
+                        opivx2_fn fn, clear_fn *clearfn)
+ {
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+ 
+     for (i = 0; i < vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, s1, vs2, i);
+@@ -1172,7 +1166,6 @@ GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+-    uint32_t mlen = vext_mlen(desc);                          \
+     uint32_t vl = env->vl;                                    \
+     uint32_t esz = sizeof(ETYPE);                             \
+     uint32_t vlmax = vext_maxsz(desc) / esz;                  \
+@@ -1181,7 +1174,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     for (i = 0; i < vl; i++) {                                \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+-        uint8_t carry = vext_elem_mask(v0, mlen, i);          \
++        uint8_t carry = vext_elem_mask(v0, i);                \
+                                                               \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry);         \
+     }                                                         \
+@@ -1202,7 +1195,6 @@ GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,        \
+                   CPURISCVState *env, uint32_t desc)                     \
+ {                                                                        \
+-    uint32_t mlen = vext_mlen(desc);                                     \
+     uint32_t vl = env->vl;                                               \
+     uint32_t esz = sizeof(ETYPE);                                        \
+     uint32_t vlmax = vext_maxsz(desc) / esz;                             \
+@@ -1210,7 +1202,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,        \
+                                                                          \
+     for (i = 0; i < vl; i++) {                                           \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                               \
+-        uint8_t carry = vext_elem_mask(v0, mlen, i);                     \
++        uint8_t carry = vext_elem_mask(v0, i);                           \
+                                                                          \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
+     }                                                                    \
+@@ -1235,7 +1227,6 @@ GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+-    uint32_t mlen = vext_mlen(desc);                          \
+     uint32_t vl = env->vl;                                    \
+     uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);        \
+     uint32_t i;                                               \
+@@ -1243,12 +1234,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     for (i = 0; i < vl; i++) {                                \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+-        uint8_t carry = vext_elem_mask(v0, mlen, i);          \
++        uint8_t carry = vext_elem_mask(v0, i);                \
+                                                               \
+-        vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, carry));\
++        vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry));      \
+     }                                                         \
+     for (; i < vlmax; i++) {                                  \
+-        vext_set_elem_mask(vd, mlen, i, 0);                   \
++        vext_set_elem_mask(vd, i, 0);                         \
+     }                                                         \
+ }
+ 
+@@ -1266,20 +1257,19 @@ GEN_VEXT_VMADC_VVM(vmsbc_vvm_d, uint64_t, H8, DO_MSBC)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,          \
+                   void *vs2, CPURISCVState *env, uint32_t desc) \
+ {                                                               \
+-    uint32_t mlen = vext_mlen(desc);                            \
+     uint32_t vl = env->vl;                                      \
+     uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);          \
+     uint32_t i;                                                 \
+                                                                 \
+     for (i = 0; i < vl; i++) {                                  \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                      \
+-        uint8_t carry = vext_elem_mask(v0, mlen, i);            \
++        uint8_t carry = vext_elem_mask(v0, i);                  \
+                                                                 \
+-        vext_set_elem_mask(vd, mlen, i,                         \
++        vext_set_elem_mask(vd, i,                               \
+                 DO_OP(s2, (ETYPE)(target_long)s1, carry));      \
+     }                                                           \
+     for (; i < vlmax; i++) {                                    \
+-        vext_set_elem_mask(vd, mlen, i, 0);                     \
++        vext_set_elem_mask(vd, i, 0);                           \
+     }                                                           \
+ }
+ 
+@@ -1353,7 +1343,6 @@ GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1,                          \
+                   void *vs2, CPURISCVState *env, uint32_t desc)           \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t esz = sizeof(TS1);                                           \
+@@ -1361,7 +1350,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,                          \
+     uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         TS1 s1 = *((TS1 *)vs1 + HS1(i));                                  \
+@@ -1391,7 +1380,6 @@ GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,                \
+         void *vs2, CPURISCVState *env, uint32_t desc)                 \
+ {                                                                     \
+-    uint32_t mlen = vext_mlen(desc);                                  \
+     uint32_t vm = vext_vm(desc);                                      \
+     uint32_t vl = env->vl;                                            \
+     uint32_t esz = sizeof(TD);                                        \
+@@ -1399,7 +1387,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,                \
+     uint32_t i;                                                       \
+                                                                       \
+     for (i = 0; i < vl; i++) {                                        \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                    \
++        if (!vm && !vext_elem_mask(v0, i)) {                          \
+             continue;                                                 \
+         }                                                             \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                              \
+@@ -1448,7 +1436,6 @@ GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+-    uint32_t mlen = vext_mlen(desc);                          \
+     uint32_t vm = vext_vm(desc);                              \
+     uint32_t vl = env->vl;                                    \
+     uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);        \
+@@ -1457,13 +1444,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     for (i = 0; i < vl; i++) {                                \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {            \
++        if (!vm && !vext_elem_mask(v0, i)) {                  \
+             continue;                                         \
+         }                                                     \
+-        vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1));       \
++        vext_set_elem_mask(vd, i, DO_OP(s2, s1));             \
+     }                                                         \
+     for (; i < vlmax; i++) {                                  \
+-        vext_set_elem_mask(vd, mlen, i, 0);                   \
++        vext_set_elem_mask(vd, i, 0);                         \
+     }                                                         \
+ }
+ 
+@@ -1501,7 +1488,6 @@ GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,   \
+                   CPURISCVState *env, uint32_t desc)                \
+ {                                                                   \
+-    uint32_t mlen = vext_mlen(desc);                                \
+     uint32_t vm = vext_vm(desc);                                    \
+     uint32_t vl = env->vl;                                          \
+     uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);              \
+@@ -1509,14 +1495,14 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,   \
+                                                                     \
+     for (i = 0; i < vl; i++) {                                      \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                          \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                  \
++        if (!vm && !vext_elem_mask(v0, i)) {                        \
+             continue;                                               \
+         }                                                           \
+-        vext_set_elem_mask(vd, mlen, i,                             \
++        vext_set_elem_mask(vd, i,                                   \
+                 DO_OP(s2, (ETYPE)(target_long)s1));                 \
+     }                                                               \
+     for (; i < vlmax; i++) {                                        \
+-        vext_set_elem_mask(vd, mlen, i, 0);                         \
++        vext_set_elem_mask(vd, i, 0);                               \
+     }                                                               \
+ }
+ 
+@@ -2078,14 +2064,13 @@ GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,          \
+                   CPURISCVState *env, uint32_t desc)                 \
+ {                                                                    \
+-    uint32_t mlen = vext_mlen(desc);                                 \
+     uint32_t vl = env->vl;                                           \
+     uint32_t esz = sizeof(ETYPE);                                    \
+     uint32_t vlmax = vext_maxsz(desc) / esz;                         \
+     uint32_t i;                                                      \
+                                                                      \
+     for (i = 0; i < vl; i++) {                                       \
+-        ETYPE *vt = (!vext_elem_mask(v0, mlen, i) ? vs2 : vs1);      \
++        ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1);            \
+         *((ETYPE *)vd + H(i)) = *(vt + H(i));                        \
+     }                                                                \
+     CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                         \
+@@ -2100,7 +2085,6 @@ GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,               \
+                   void *vs2, CPURISCVState *env, uint32_t desc)      \
+ {                                                                    \
+-    uint32_t mlen = vext_mlen(desc);                                 \
+     uint32_t vl = env->vl;                                           \
+     uint32_t esz = sizeof(ETYPE);                                    \
+     uint32_t vlmax = vext_maxsz(desc) / esz;                         \
+@@ -2108,7 +2092,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,               \
+                                                                      \
+     for (i = 0; i < vl; i++) {                                       \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                           \
+-        ETYPE d = (!vext_elem_mask(v0, mlen, i) ? s2 :               \
++        ETYPE d = (!vext_elem_mask(v0, i) ? s2 :                     \
+                    (ETYPE)(target_long)s1);                          \
+         *((ETYPE *)vd + H(i)) = d;                                   \
+     }                                                                \
+@@ -2146,11 +2130,11 @@ do_##NAME(void *vd, void *vs1, void *vs2, int i,                    \
+ static inline void
+ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
+              CPURISCVState *env,
+-             uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm,
++             uint32_t vl, uint32_t vm, int vxrm,
+              opivv2_rm_fn *fn)
+ {
+     for (uint32_t i = 0; i < vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, vs1, vs2, i, env, vxrm);
+@@ -2164,26 +2148,25 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
+              opivv2_rm_fn *fn, clear_fn *clearfn)
+ {
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+ 
+     switch (env->vxrm) {
+     case 0: /* rnu */
+         vext_vv_rm_1(vd, v0, vs1, vs2,
+-                     env, vl, vm, mlen, 0, fn);
++                     env, vl, vm, 0, fn);
+         break;
+     case 1: /* rne */
+         vext_vv_rm_1(vd, v0, vs1, vs2,
+-                     env, vl, vm, mlen, 1, fn);
++                     env, vl, vm, 1, fn);
+         break;
+     case 2: /* rdn */
+         vext_vv_rm_1(vd, v0, vs1, vs2,
+-                     env, vl, vm, mlen, 2, fn);
++                     env, vl, vm, 2, fn);
+         break;
+     default: /* rod */
+         vext_vv_rm_1(vd, v0, vs1, vs2,
+-                     env, vl, vm, mlen, 3, fn);
++                     env, vl, vm, 3, fn);
+         break;
+     }
+ 
+@@ -2266,11 +2249,11 @@ do_##NAME(void *vd, target_long s1, void *vs2, int i,               \
+ static inline void
+ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
+              CPURISCVState *env,
+-             uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm,
++             uint32_t vl, uint32_t vm, int vxrm,
+              opivx2_rm_fn *fn)
+ {
+     for (uint32_t i = 0; i < vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, s1, vs2, i, env, vxrm);
+@@ -2284,26 +2267,25 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
+              opivx2_rm_fn *fn, clear_fn *clearfn)
+ {
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+ 
+     switch (env->vxrm) {
+     case 0: /* rnu */
+         vext_vx_rm_1(vd, v0, s1, vs2,
+-                     env, vl, vm, mlen, 0, fn);
++                     env, vl, vm, 0, fn);
+         break;
+     case 1: /* rne */
+         vext_vx_rm_1(vd, v0, s1, vs2,
+-                     env, vl, vm, mlen, 1, fn);
++                     env, vl, vm, 1, fn);
+         break;
+     case 2: /* rdn */
+         vext_vx_rm_1(vd, v0, s1, vs2,
+-                     env, vl, vm, mlen, 2, fn);
++                     env, vl, vm, 2, fn);
+         break;
+     default: /* rod */
+         vext_vx_rm_1(vd, v0, s1, vs2,
+-                     env, vl, vm, mlen, 3, fn);
++                     env, vl, vm, 3, fn);
+         break;
+     }
+ 
+@@ -3188,13 +3170,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+                   uint32_t desc)                          \
+ {                                                         \
+     uint32_t vlmax = vext_maxsz(desc) / ESZ;              \
+-    uint32_t mlen = vext_mlen(desc);                      \
+     uint32_t vm = vext_vm(desc);                          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+                                                           \
+     for (i = 0; i < vl; i++) {                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {        \
++        if (!vm && !vext_elem_mask(v0, i)) {              \
+             continue;                                     \
+         }                                                 \
+         do_##NAME(vd, vs1, vs2, i, env);                  \
+@@ -3223,13 +3204,12 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1,        \
+                   uint32_t desc)                          \
+ {                                                         \
+     uint32_t vlmax = vext_maxsz(desc) / ESZ;              \
+-    uint32_t mlen = vext_mlen(desc);                      \
+     uint32_t vm = vext_vm(desc);                          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+                                                           \
+     for (i = 0; i < vl; i++) {                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {        \
++        if (!vm && !vext_elem_mask(v0, i)) {              \
+             continue;                                     \
+         }                                                 \
+         do_##NAME(vd, s1, vs2, i, env);                   \
+@@ -3794,7 +3774,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+         CPURISCVState *env, uint32_t desc)             \
+ {                                                      \
+     uint32_t vlmax = vext_maxsz(desc) / ESZ;           \
+-    uint32_t mlen = vext_mlen(desc);                   \
+     uint32_t vm = vext_vm(desc);                       \
+     uint32_t vl = env->vl;                             \
+     uint32_t i;                                        \
+@@ -3803,7 +3782,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+         return;                                        \
+     }                                                  \
+     for (i = 0; i < vl; i++) {                         \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {     \
++        if (!vm && !vext_elem_mask(v0, i)) {           \
+             continue;                                  \
+         }                                              \
+         do_##NAME(vd, vs2, i, env);                    \
+@@ -3935,7 +3914,6 @@ GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+-    uint32_t mlen = vext_mlen(desc);                          \
+     uint32_t vm = vext_vm(desc);                              \
+     uint32_t vl = env->vl;                                    \
+     uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);        \
+@@ -3944,14 +3922,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     for (i = 0; i < vl; i++) {                                \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {            \
++        if (!vm && !vext_elem_mask(v0, i)) {                  \
+             continue;                                         \
+         }                                                     \
+-        vext_set_elem_mask(vd, mlen, i,                       \
++        vext_set_elem_mask(vd, i,                             \
+                            DO_OP(s2, s1, &env->fp_status));   \
+     }                                                         \
+     for (; i < vlmax; i++) {                                  \
+-        vext_set_elem_mask(vd, mlen, i, 0);                   \
++        vext_set_elem_mask(vd, i, 0);                         \
+     }                                                         \
+ }
+ 
+@@ -3963,7 +3941,6 @@ GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet)
+ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2,       \
+                   CPURISCVState *env, uint32_t desc)                \
+ {                                                                   \
+-    uint32_t mlen = vext_mlen(desc);                                \
+     uint32_t vm = vext_vm(desc);                                    \
+     uint32_t vl = env->vl;                                          \
+     uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);              \
+@@ -3971,14 +3948,14 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2,       \
+                                                                     \
+     for (i = 0; i < vl; i++) {                                      \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                          \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                  \
++        if (!vm && !vext_elem_mask(v0, i)) {                        \
+             continue;                                               \
+         }                                                           \
+-        vext_set_elem_mask(vd, mlen, i,                             \
++        vext_set_elem_mask(vd, i,                                   \
+                            DO_OP(s2, (ETYPE)s1, &env->fp_status));  \
+     }                                                               \
+     for (; i < vlmax; i++) {                                        \
+-        vext_set_elem_mask(vd, mlen, i, 0);                         \
++        vext_set_elem_mask(vd, i, 0);                               \
+     }                                                               \
+ }
+ 
+@@ -4092,13 +4069,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+                   CPURISCVState *env, uint32_t desc)   \
+ {                                                      \
+     uint32_t vlmax = vext_maxsz(desc) / ESZ;           \
+-    uint32_t mlen = vext_mlen(desc);                   \
+     uint32_t vm = vext_vm(desc);                       \
+     uint32_t vl = env->vl;                             \
+     uint32_t i;                                        \
+                                                        \
+     for (i = 0; i < vl; i++) {                         \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {     \
++        if (!vm && !vext_elem_mask(v0, i)) {           \
+             continue;                                  \
+         }                                              \
+         do_##NAME(vd, vs2, i);                         \
+@@ -4175,7 +4151,6 @@ GEN_VEXT_V(vfclass_v_d, 8, 8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+-    uint32_t mlen = vext_mlen(desc);                          \
+     uint32_t vm = vext_vm(desc);                              \
+     uint32_t vl = env->vl;                                    \
+     uint32_t esz = sizeof(ETYPE);                             \
+@@ -4185,7 +4160,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+     for (i = 0; i < vl; i++) {                                \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+         *((ETYPE *)vd + H(i))                                 \
+-          = (!vm && !vext_elem_mask(v0, mlen, i) ? s2 : s1);  \
++          = (!vm && !vext_elem_mask(v0, i) ? s2 : s1);        \
+     }                                                         \
+     CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                  \
+ }
+@@ -4316,7 +4291,6 @@ GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+         void *vs2, CPURISCVState *env, uint32_t desc)     \
+ {                                                         \
+-    uint32_t mlen = vext_mlen(desc);                      \
+     uint32_t vm = vext_vm(desc);                          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+@@ -4325,7 +4299,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+                                                           \
+     for (i = 0; i < vl; i++) {                            \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                  \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {        \
++        if (!vm && !vext_elem_mask(v0, i)) {              \
+             continue;                                     \
+         }                                                 \
+         s1 = OP(s1, (TD)s2);                              \
+@@ -4399,7 +4373,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,           \
+                   void *vs2, CPURISCVState *env,           \
+                   uint32_t desc)                           \
+ {                                                          \
+-    uint32_t mlen = vext_mlen(desc);                       \
+     uint32_t vm = vext_vm(desc);                           \
+     uint32_t vl = env->vl;                                 \
+     uint32_t i;                                            \
+@@ -4408,7 +4381,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,           \
+                                                            \
+     for (i = 0; i < vl; i++) {                             \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                   \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {         \
++        if (!vm && !vext_elem_mask(v0, i)) {               \
+             continue;                                      \
+         }                                                  \
+         s1 = OP(s1, (TD)s2, &env->fp_status);              \
+@@ -4437,7 +4410,6 @@ GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq)
+ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+                             void *vs2, CPURISCVState *env, uint32_t desc)
+ {
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+@@ -4446,7 +4418,7 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+ 
+     for (i = 0; i < vl; i++) {
+         uint16_t s2 = *((uint16_t *)vs2 + H2(i));
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
+@@ -4459,7 +4431,6 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+                             void *vs2, CPURISCVState *env, uint32_t desc)
+ {
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+@@ -4468,7 +4439,7 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+ 
+     for (i = 0; i < vl; i++) {
+         uint32_t s2 = *((uint32_t *)vs2 + H4(i));
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
+@@ -4487,19 +4458,18 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+                   void *vs2, CPURISCVState *env,          \
+                   uint32_t desc)                          \
+ {                                                         \
+-    uint32_t mlen = vext_mlen(desc);                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+     int a, b;                                             \
+                                                           \
+     for (i = 0; i < vl; i++) {                            \
+-        a = vext_elem_mask(vs1, mlen, i);                 \
+-        b = vext_elem_mask(vs2, mlen, i);                 \
+-        vext_set_elem_mask(vd, mlen, i, OP(b, a));        \
++        a = vext_elem_mask(vs1, i);                       \
++        b = vext_elem_mask(vs2, i);                       \
++        vext_set_elem_mask(vd, i, OP(b, a));              \
+     }                                                     \
+     for (; i < vlmax; i++) {                              \
+-        vext_set_elem_mask(vd, mlen, i, 0);               \
++        vext_set_elem_mask(vd, i, 0);                     \
+     }                                                     \
+ }
+ 
+@@ -4523,14 +4493,13 @@ target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env,
+                               uint32_t desc)
+ {
+     target_ulong cnt = 0;
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     int i;
+ 
+     for (i = 0; i < vl; i++) {
+-        if (vm || vext_elem_mask(v0, mlen, i)) {
+-            if (vext_elem_mask(vs2, mlen, i)) {
++        if (vm || vext_elem_mask(v0, i)) {
++            if (vext_elem_mask(vs2, i)) {
+                 cnt++;
+             }
+         }
+@@ -4542,14 +4511,13 @@ target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env,
+ target_ulong HELPER(vmfirst_m)(void *v0, void *vs2, CPURISCVState *env,
+                                uint32_t desc)
+ {
+-    uint32_t mlen = vext_mlen(desc);
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     int i;
+ 
+     for (i = 0; i < vl; i++) {
+-        if (vm || vext_elem_mask(v0, mlen, i)) {
+-            if (vext_elem_mask(vs2, mlen, i)) {
++        if (vm || vext_elem_mask(v0, i)) {
++            if (vext_elem_mask(vs2, i)) {
+                 return i;
+             }
+         }
+@@ -4566,39 +4534,38 @@ enum set_mask_type {
+ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
+                    uint32_t desc, enum set_mask_type type)
+ {
+-    uint32_t mlen = vext_mlen(desc);
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     int i;
+     bool first_mask_bit = false;
+ 
+     for (i = 0; i < vl; i++) {
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {
++        if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         /* write a zero to all following active elements */
+         if (first_mask_bit) {
+-            vext_set_elem_mask(vd, mlen, i, 0);
++            vext_set_elem_mask(vd, i, 0);
+             continue;
+         }
+-        if (vext_elem_mask(vs2, mlen, i)) {
++        if (vext_elem_mask(vs2, i)) {
+             first_mask_bit = true;
+             if (type == BEFORE_FIRST) {
+-                vext_set_elem_mask(vd, mlen, i, 0);
++                vext_set_elem_mask(vd, i, 0);
+             } else {
+-                vext_set_elem_mask(vd, mlen, i, 1);
++                vext_set_elem_mask(vd, i, 1);
+             }
+         } else {
+             if (type == ONLY_FIRST) {
+-                vext_set_elem_mask(vd, mlen, i, 0);
++                vext_set_elem_mask(vd, i, 0);
+             } else {
+-                vext_set_elem_mask(vd, mlen, i, 1);
++                vext_set_elem_mask(vd, i, 1);
+             }
+         }
+     }
+     for (; i < vlmax; i++) {
+-        vext_set_elem_mask(vd, mlen, i, 0);
++        vext_set_elem_mask(vd, i, 0);
+     }
+ }
+ 
+@@ -4625,19 +4592,18 @@ void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
+ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env,      \
+                   uint32_t desc)                                          \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t sum = 0;                                                     \
+     int i;                                                                \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = sum;                                      \
+-        if (vext_elem_mask(vs2, mlen, i)) {                               \
++        if (vext_elem_mask(vs2, i)) {                                     \
+             sum++;                                                        \
+         }                                                                 \
+     }                                                                     \
+@@ -4653,14 +4619,13 @@ GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq)
+ #define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN)                          \
+ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc)  \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     int i;                                                                \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = i;                                        \
+@@ -4682,14 +4647,13 @@ GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     target_ulong offset = s1, i;                                          \
+                                                                           \
+     for (i = offset; i < vl; i++) {                                       \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset));          \
+@@ -4707,15 +4671,14 @@ GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     target_ulong offset = s1, i;                                          \
+                                                                           \
+     for (i = 0; i < vl; ++i) {                                            \
+         target_ulong j = i + offset;                                      \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j));  \
+@@ -4733,14 +4696,13 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         if (i == 0) {                                                     \
+@@ -4762,14 +4724,13 @@ GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         if (i == vl - 1) {                                                \
+@@ -4792,15 +4753,14 @@ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint64_t index;                                                       \
+     uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         index = *((ETYPE *)vs1 + H(i));                                   \
+@@ -4823,15 +4783,14 @@ GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint64_t index = s1;                                                  \
+     uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, mlen, i)) {                        \
++        if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         if (index >= vlmax) {                                             \
+@@ -4854,13 +4813,12 @@ GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t mlen = vext_mlen(desc);                                      \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;                   \
++    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t num = 0, i;                                                  \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+-        if (!vext_elem_mask(vs1, mlen, i)) {                              \
++        if (!vext_elem_mask(vs1, i)) {                                    \
+             continue;                                                     \
+         }                                                                 \
+         *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i));                 \
+-- 
+2.33.1
+

+ 123 - 0
recipes-devtools/qemu/qemu/0020-target-riscv-rvv-1.0-add-fractional-LMUL.patch

@@ -0,0 +1,123 @@
+From 37f5622ac3d829866e2d81a1c5f46dc67ba22420 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 14 Aug 2020 17:35:55 +0800
+Subject: [PATCH 020/107] target/riscv: rvv-1.0: add fractional LMUL
+
+Introduce the concepts of fractional LMUL for RVV 1.0.
+In RVV 1.0, LMUL bits are contiguous in vtype register.
+
+Also rearrange rvv bits in TB_FLAGS to skip MSTATUS_VS (0x600)
+and MSTATUS_FS (0x6000) bits.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu.h           | 18 ++++++++++--------
+ target/riscv/translate.c     | 16 ++++++++++++++--
+ target/riscv/vector_helper.c | 16 ++++++++++++++--
+ 3 files changed, 38 insertions(+), 12 deletions(-)
+
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index 8fd7c01567..33cb265304 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -105,10 +105,10 @@ typedef struct CPURISCVState CPURISCVState;
+ 
+ #define RV_VLEN_MAX 256
+ 
+-FIELD(VTYPE, VLMUL, 0, 2)
+-FIELD(VTYPE, VSEW, 2, 3)
+-FIELD(VTYPE, VEDIV, 5, 2)
+-FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9)
++FIELD(VTYPE, VLMUL, 0, 3)
++FIELD(VTYPE, VSEW, 3, 3)
++FIELD(VTYPE, VEDIV, 8, 2)
++FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
+ FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
+ 
+ struct CPURISCVState {
+@@ -390,12 +390,14 @@ typedef RISCVCPU ArchCPU;
+ #include "exec/cpu-all.h"
+ 
+ FIELD(TB_FLAGS, MEM_IDX, 0, 3)
+-FIELD(TB_FLAGS, VL_EQ_VLMAX, 3, 1)
+-FIELD(TB_FLAGS, LMUL, 4, 2)
++FIELD(TB_FLAGS, LMUL, 3, 3)
+ FIELD(TB_FLAGS, SEW, 6, 3)
+-FIELD(TB_FLAGS, VILL, 9, 1)
++/* Skip MSTATUS_VS (0x600) bits */
++FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1)
++FIELD(TB_FLAGS, VILL, 12, 1)
++/* Skip MSTATUS_FS (0x6000) bits */
+ /* Is a Hypervisor instruction load/store allowed? */
+-FIELD(TB_FLAGS, HLSX, 10, 1)
++FIELD(TB_FLAGS, HLSX, 15, 1)
+ 
+ bool riscv_cpu_is_32bit(CPURISCVState *env);
+ 
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index b18f76c344..d10e489cfe 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -61,7 +61,19 @@ typedef struct DisasContext {
+     bool hlsx;
+     /* vector extension */
+     bool vill;
+-    uint8_t lmul;
++    /*
++     * Encode LMUL to lmul as follows:
++     *     LMUL    vlmul    lmul
++     *      1       000       0
++     *      2       001       1
++     *      4       010       2
++     *      8       011       3
++     *      -       100       -
++     *     1/8      101      -3
++     *     1/4      110      -2
++     *     1/2      111      -1
++     */
++    int8_t lmul;
+     uint8_t sew;
+     uint16_t vlen;
+     bool vl_eq_vlmax;
+@@ -694,7 +706,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
+     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
+     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
+-    ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
++    ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
+     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
+     ctx->cs = cs;
+ }
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index dea1d190ed..f28d20a97d 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -86,9 +86,21 @@ static inline uint32_t vext_vm(uint32_t desc)
+     return FIELD_EX32(simd_data(desc), VDATA, VM);
+ }
+ 
+-static inline uint32_t vext_lmul(uint32_t desc)
++/*
++ * Encode LMUL to lmul as following:
++ *     LMUL    vlmul    lmul
++ *      1       000       0
++ *      2       001       1
++ *      4       010       2
++ *      8       011       3
++ *      -       100       -
++ *     1/8      101      -3
++ *     1/4      110      -2
++ *     1/2      111      -1
++ */
++static inline int32_t vext_lmul(uint32_t desc)
+ {
+-    return FIELD_EX32(simd_data(desc), VDATA, LMUL);
++    return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
+ }
+ 
+ static uint32_t vext_wd(uint32_t desc)
+-- 
+2.33.1
+

+ 3290 - 0
recipes-devtools/qemu/qemu/0021-target-riscv-rvv-1.0-add-VMA-and-VTA.patch

@@ -0,0 +1,3290 @@
+From 2258cdd072fa32063a7a66fef68808cd597afb9d Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 30 Jul 2020 20:42:19 +0800
+Subject: [PATCH 021/107] target/riscv: rvv-1.0: add VMA and VTA
+
+Introduce vma and vta fields in vtype register.
+
+According to RVV 1.0 spec (section 3.3.3):
+
+When a set is marked agnostic, the corresponding set of destination
+elements in any vector or mask destination operand can either retain
+the value they previously held, or are overwritten with 1s.
+
+So, either vta/vma is set to undisturbed or agnostic, it's legal to
+retain the inactive masked-off elements and tail elements' original
+values unchanged. Therefore, besides declaring vta/vma fields in vtype
+register, also remove all the tail elements clean functions in this
+commit.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu.h           |    2 +
+ target/riscv/vector_helper.c | 1927 ++++++++++++++++------------------
+ 2 files changed, 891 insertions(+), 1038 deletions(-)
+
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index 33cb265304..3835d22ca1 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -107,6 +107,8 @@ typedef struct CPURISCVState CPURISCVState;
+ 
+ FIELD(VTYPE, VLMUL, 0, 3)
+ FIELD(VTYPE, VSEW, 3, 3)
++FIELD(VTYPE, VTA, 6, 1)
++FIELD(VTYPE, VMA, 7, 1)
+ FIELD(VTYPE, VEDIV, 8, 2)
+ FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
+ FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index f28d20a97d..5a142a1f4b 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -146,55 +146,6 @@ static void probe_pages(CPURISCVState *env, target_ulong addr,
+     }
+ }
+ 
+-#ifdef HOST_WORDS_BIGENDIAN
+-static void vext_clear(void *tail, uint32_t cnt, uint32_t tot)
+-{
+-    /*
+-     * Split the remaining range to two parts.
+-     * The first part is in the last uint64_t unit.
+-     * The second part start from the next uint64_t unit.
+-     */
+-    int part1 = 0, part2 = tot - cnt;
+-    if (cnt % 8) {
+-        part1 = 8 - (cnt % 8);
+-        part2 = tot - cnt - part1;
+-        memset(QEMU_ALIGN_PTR_DOWN(tail, 8), 0, part1);
+-        memset(QEMU_ALIGN_PTR_UP(tail, 8), 0, part2);
+-    } else {
+-        memset(tail, 0, part2);
+-    }
+-}
+-#else
+-static void vext_clear(void *tail, uint32_t cnt, uint32_t tot)
+-{
+-    memset(tail, 0, tot - cnt);
+-}
+-#endif
+-
+-static void clearb(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+-{
+-    int8_t *cur = ((int8_t *)vd + H1(idx));
+-    vext_clear(cur, cnt, tot);
+-}
+-
+-static void clearh(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+-{
+-    int16_t *cur = ((int16_t *)vd + H2(idx));
+-    vext_clear(cur, cnt, tot);
+-}
+-
+-static void clearl(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+-{
+-    int32_t *cur = ((int32_t *)vd + H4(idx));
+-    vext_clear(cur, cnt, tot);
+-}
+-
+-static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+-{
+-    int64_t *cur = (int64_t *)vd + idx;
+-    vext_clear(cur, cnt, tot);
+-}
+-
+ static inline void vext_set_elem_mask(void *v0, int index,
+         uint8_t value)
+ {
+@@ -219,7 +170,6 @@ static inline int vext_elem_mask(void *v0, int index)
+ /* elements operations for load and store */
+ typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr,
+                                uint32_t idx, void *vd, uintptr_t retaddr);
+-typedef void clear_fn(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot);
+ 
+ #define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF)     \
+ static void NAME(CPURISCVState *env, abi_ptr addr,         \
+@@ -283,7 +233,7 @@ static void
+ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+                  target_ulong stride, CPURISCVState *env,
+                  uint32_t desc, uint32_t vm,
+-                 vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem,
++                 vext_ldst_elem_fn *ldst_elem,
+                  uint32_t esz, uint32_t msz, uintptr_t ra,
+                  MMUAccessType access_type)
+ {
+@@ -310,47 +260,41 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+             k++;
+         }
+     }
+-    /* clear tail elements */
+-    if (clear_elem) {
+-        for (k = 0; k < nf; k++) {
+-            clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+-        }
+-    }
+ }
+ 
+-#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN)       \
++#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN)                 \
+ void HELPER(NAME)(void *vd, void * v0, target_ulong base,               \
+                   target_ulong stride, CPURISCVState *env,              \
+                   uint32_t desc)                                        \
+ {                                                                       \
+     uint32_t vm = vext_vm(desc);                                        \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN,      \
+-                     CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE),            \
++                     sizeof(ETYPE), sizeof(MTYPE),                      \
+                      GETPC(), MMU_DATA_LOAD);                           \
+ }
+ 
+-GEN_VEXT_LD_STRIDE(vlsb_v_b,  int8_t,   int8_t,   ldb_b,  clearb)
+-GEN_VEXT_LD_STRIDE(vlsb_v_h,  int8_t,   int16_t,  ldb_h,  clearh)
+-GEN_VEXT_LD_STRIDE(vlsb_v_w,  int8_t,   int32_t,  ldb_w,  clearl)
+-GEN_VEXT_LD_STRIDE(vlsb_v_d,  int8_t,   int64_t,  ldb_d,  clearq)
+-GEN_VEXT_LD_STRIDE(vlsh_v_h,  int16_t,  int16_t,  ldh_h,  clearh)
+-GEN_VEXT_LD_STRIDE(vlsh_v_w,  int16_t,  int32_t,  ldh_w,  clearl)
+-GEN_VEXT_LD_STRIDE(vlsh_v_d,  int16_t,  int64_t,  ldh_d,  clearq)
+-GEN_VEXT_LD_STRIDE(vlsw_v_w,  int32_t,  int32_t,  ldw_w,  clearl)
+-GEN_VEXT_LD_STRIDE(vlsw_v_d,  int32_t,  int64_t,  ldw_d,  clearq)
+-GEN_VEXT_LD_STRIDE(vlse_v_b,  int8_t,   int8_t,   lde_b,  clearb)
+-GEN_VEXT_LD_STRIDE(vlse_v_h,  int16_t,  int16_t,  lde_h,  clearh)
+-GEN_VEXT_LD_STRIDE(vlse_v_w,  int32_t,  int32_t,  lde_w,  clearl)
+-GEN_VEXT_LD_STRIDE(vlse_v_d,  int64_t,  int64_t,  lde_d,  clearq)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t,  uint8_t,  ldbu_b, clearb)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t,  uint16_t, ldbu_h, clearh)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t,  uint32_t, ldbu_w, clearl)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t,  uint64_t, ldbu_d, clearq)
+-GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h, clearh)
+-GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w, clearl)
+-GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d, clearq)
+-GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w, clearl)
+-GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d, clearq)
++GEN_VEXT_LD_STRIDE(vlsb_v_b,  int8_t,   int8_t,   ldb_b)
++GEN_VEXT_LD_STRIDE(vlsb_v_h,  int8_t,   int16_t,  ldb_h)
++GEN_VEXT_LD_STRIDE(vlsb_v_w,  int8_t,   int32_t,  ldb_w)
++GEN_VEXT_LD_STRIDE(vlsb_v_d,  int8_t,   int64_t,  ldb_d)
++GEN_VEXT_LD_STRIDE(vlsh_v_h,  int16_t,  int16_t,  ldh_h)
++GEN_VEXT_LD_STRIDE(vlsh_v_w,  int16_t,  int32_t,  ldh_w)
++GEN_VEXT_LD_STRIDE(vlsh_v_d,  int16_t,  int64_t,  ldh_d)
++GEN_VEXT_LD_STRIDE(vlsw_v_w,  int32_t,  int32_t,  ldw_w)
++GEN_VEXT_LD_STRIDE(vlsw_v_d,  int32_t,  int64_t,  ldw_d)
++GEN_VEXT_LD_STRIDE(vlse_v_b,  int8_t,   int8_t,   lde_b)
++GEN_VEXT_LD_STRIDE(vlse_v_h,  int16_t,  int16_t,  lde_h)
++GEN_VEXT_LD_STRIDE(vlse_v_w,  int32_t,  int32_t,  lde_w)
++GEN_VEXT_LD_STRIDE(vlse_v_d,  int64_t,  int64_t,  lde_d)
++GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t,  uint8_t,  ldbu_b)
++GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t,  uint16_t, ldbu_h)
++GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t,  uint32_t, ldbu_w)
++GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t,  uint64_t, ldbu_d)
++GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h)
++GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w)
++GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d)
++GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w)
++GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d)
+ 
+ #define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN)                \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+@@ -359,7 +303,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+ {                                                                       \
+     uint32_t vm = vext_vm(desc);                                        \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN,     \
+-                     NULL, sizeof(ETYPE), sizeof(MTYPE),                \
++                     sizeof(ETYPE), sizeof(MTYPE),                      \
+                      GETPC(), MMU_DATA_STORE);                          \
+ }
+ 
+@@ -384,9 +328,8 @@ GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d)
+ /* unmasked unit-stride load and store operation*/
+ static void
+ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+-             vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem,
+-             uint32_t esz, uint32_t msz, uintptr_t ra,
+-             MMUAccessType access_type)
++             vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t msz,
++             uintptr_t ra, MMUAccessType access_type)
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+@@ -403,12 +346,6 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+             k++;
+         }
+     }
+-    /* clear tail elements */
+-    if (clear_elem) {
+-        for (k = 0; k < nf; k++) {
+-            clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+-        }
+-    }
+ }
+ 
+ /*
+@@ -416,45 +353,45 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+  * stride = NF * sizeof (MTYPE)
+  */
+ 
+-#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN)           \
++#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN)                     \
+ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+                          CPURISCVState *env, uint32_t desc)             \
+ {                                                                       \
+     uint32_t stride = vext_nf(desc) * sizeof(MTYPE);                    \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN,   \
+-                     CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE),            \
++                     sizeof(ETYPE), sizeof(MTYPE),                      \
+                      GETPC(), MMU_DATA_LOAD);                           \
+ }                                                                       \
+                                                                         \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   CPURISCVState *env, uint32_t desc)                    \
+ {                                                                       \
+-    vext_ldst_us(vd, base, env, desc, LOAD_FN, CLEAR_FN,                \
++    vext_ldst_us(vd, base, env, desc, LOAD_FN,                          \
+                  sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \
+ }
+ 
+-GEN_VEXT_LD_US(vlb_v_b,  int8_t,   int8_t,   ldb_b,  clearb)
+-GEN_VEXT_LD_US(vlb_v_h,  int8_t,   int16_t,  ldb_h,  clearh)
+-GEN_VEXT_LD_US(vlb_v_w,  int8_t,   int32_t,  ldb_w,  clearl)
+-GEN_VEXT_LD_US(vlb_v_d,  int8_t,   int64_t,  ldb_d,  clearq)
+-GEN_VEXT_LD_US(vlh_v_h,  int16_t,  int16_t,  ldh_h,  clearh)
+-GEN_VEXT_LD_US(vlh_v_w,  int16_t,  int32_t,  ldh_w,  clearl)
+-GEN_VEXT_LD_US(vlh_v_d,  int16_t,  int64_t,  ldh_d,  clearq)
+-GEN_VEXT_LD_US(vlw_v_w,  int32_t,  int32_t,  ldw_w,  clearl)
+-GEN_VEXT_LD_US(vlw_v_d,  int32_t,  int64_t,  ldw_d,  clearq)
+-GEN_VEXT_LD_US(vle_v_b,  int8_t,   int8_t,   lde_b,  clearb)
+-GEN_VEXT_LD_US(vle_v_h,  int16_t,  int16_t,  lde_h,  clearh)
+-GEN_VEXT_LD_US(vle_v_w,  int32_t,  int32_t,  lde_w,  clearl)
+-GEN_VEXT_LD_US(vle_v_d,  int64_t,  int64_t,  lde_d,  clearq)
+-GEN_VEXT_LD_US(vlbu_v_b, uint8_t,  uint8_t,  ldbu_b, clearb)
+-GEN_VEXT_LD_US(vlbu_v_h, uint8_t,  uint16_t, ldbu_h, clearh)
+-GEN_VEXT_LD_US(vlbu_v_w, uint8_t,  uint32_t, ldbu_w, clearl)
+-GEN_VEXT_LD_US(vlbu_v_d, uint8_t,  uint64_t, ldbu_d, clearq)
+-GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h, clearh)
+-GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w, clearl)
+-GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d, clearq)
+-GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w, clearl)
+-GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d, clearq)
++GEN_VEXT_LD_US(vlb_v_b,  int8_t,   int8_t,   ldb_b)
++GEN_VEXT_LD_US(vlb_v_h,  int8_t,   int16_t,  ldb_h)
++GEN_VEXT_LD_US(vlb_v_w,  int8_t,   int32_t,  ldb_w)
++GEN_VEXT_LD_US(vlb_v_d,  int8_t,   int64_t,  ldb_d)
++GEN_VEXT_LD_US(vlh_v_h,  int16_t,  int16_t,  ldh_h)
++GEN_VEXT_LD_US(vlh_v_w,  int16_t,  int32_t,  ldh_w)
++GEN_VEXT_LD_US(vlh_v_d,  int16_t,  int64_t,  ldh_d)
++GEN_VEXT_LD_US(vlw_v_w,  int32_t,  int32_t,  ldw_w)
++GEN_VEXT_LD_US(vlw_v_d,  int32_t,  int64_t,  ldw_d)
++GEN_VEXT_LD_US(vle_v_b,  int8_t,   int8_t,   lde_b)
++GEN_VEXT_LD_US(vle_v_h,  int16_t,  int16_t,  lde_h)
++GEN_VEXT_LD_US(vle_v_w,  int32_t,  int32_t,  lde_w)
++GEN_VEXT_LD_US(vle_v_d,  int64_t,  int64_t,  lde_d)
++GEN_VEXT_LD_US(vlbu_v_b, uint8_t,  uint8_t,  ldbu_b)
++GEN_VEXT_LD_US(vlbu_v_h, uint8_t,  uint16_t, ldbu_h)
++GEN_VEXT_LD_US(vlbu_v_w, uint8_t,  uint32_t, ldbu_w)
++GEN_VEXT_LD_US(vlbu_v_d, uint8_t,  uint64_t, ldbu_d)
++GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h)
++GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w)
++GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d)
++GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w)
++GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d)
+ 
+ #define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN)                    \
+ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+@@ -462,14 +399,14 @@ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+ {                                                                       \
+     uint32_t stride = vext_nf(desc) * sizeof(MTYPE);                    \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN,  \
+-                     NULL, sizeof(ETYPE), sizeof(MTYPE),                \
++                     sizeof(ETYPE), sizeof(MTYPE),                      \
+                      GETPC(), MMU_DATA_STORE);                          \
+ }                                                                       \
+                                                                         \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   CPURISCVState *env, uint32_t desc)                    \
+ {                                                                       \
+-    vext_ldst_us(vd, base, env, desc, STORE_FN, NULL,                   \
++    vext_ldst_us(vd, base, env, desc, STORE_FN,                         \
+                  sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\
+ }
+ 
+@@ -510,7 +447,6 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+                 void *vs2, CPURISCVState *env, uint32_t desc,
+                 vext_get_index_addr get_index_addr,
+                 vext_ldst_elem_fn *ldst_elem,
+-                clear_fn *clear_elem,
+                 uint32_t esz, uint32_t msz, uintptr_t ra,
+                 MMUAccessType access_type)
+ {
+@@ -539,52 +475,46 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+             k++;
+         }
+     }
+-    /* clear tail elements */
+-    if (clear_elem) {
+-        for (k = 0; k < nf; k++) {
+-            clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+-        }
+-    }
+ }
+ 
+-#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN, CLEAR_FN) \
++#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN)           \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                   \
+                   void *vs2, CPURISCVState *env, uint32_t desc)            \
+ {                                                                          \
+     vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN,                \
+-                    LOAD_FN, CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE),       \
++                    LOAD_FN, sizeof(ETYPE), sizeof(MTYPE),                 \
+                     GETPC(), MMU_DATA_LOAD);                               \
+ }
+ 
+-GEN_VEXT_LD_INDEX(vlxb_v_b,  int8_t,   int8_t,   idx_b, ldb_b,  clearb)
+-GEN_VEXT_LD_INDEX(vlxb_v_h,  int8_t,   int16_t,  idx_h, ldb_h,  clearh)
+-GEN_VEXT_LD_INDEX(vlxb_v_w,  int8_t,   int32_t,  idx_w, ldb_w,  clearl)
+-GEN_VEXT_LD_INDEX(vlxb_v_d,  int8_t,   int64_t,  idx_d, ldb_d,  clearq)
+-GEN_VEXT_LD_INDEX(vlxh_v_h,  int16_t,  int16_t,  idx_h, ldh_h,  clearh)
+-GEN_VEXT_LD_INDEX(vlxh_v_w,  int16_t,  int32_t,  idx_w, ldh_w,  clearl)
+-GEN_VEXT_LD_INDEX(vlxh_v_d,  int16_t,  int64_t,  idx_d, ldh_d,  clearq)
+-GEN_VEXT_LD_INDEX(vlxw_v_w,  int32_t,  int32_t,  idx_w, ldw_w,  clearl)
+-GEN_VEXT_LD_INDEX(vlxw_v_d,  int32_t,  int64_t,  idx_d, ldw_d,  clearq)
+-GEN_VEXT_LD_INDEX(vlxe_v_b,  int8_t,   int8_t,   idx_b, lde_b,  clearb)
+-GEN_VEXT_LD_INDEX(vlxe_v_h,  int16_t,  int16_t,  idx_h, lde_h,  clearh)
+-GEN_VEXT_LD_INDEX(vlxe_v_w,  int32_t,  int32_t,  idx_w, lde_w,  clearl)
+-GEN_VEXT_LD_INDEX(vlxe_v_d,  int64_t,  int64_t,  idx_d, lde_d,  clearq)
+-GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t,  uint8_t,  idx_b, ldbu_b, clearb)
+-GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t,  uint16_t, idx_h, ldbu_h, clearh)
+-GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t,  uint32_t, idx_w, ldbu_w, clearl)
+-GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t,  uint64_t, idx_d, ldbu_d, clearq)
+-GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h, clearh)
+-GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w, clearl)
+-GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d, clearq)
+-GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w, clearl)
+-GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d, clearq)
++GEN_VEXT_LD_INDEX(vlxb_v_b,  int8_t,   int8_t,   idx_b, ldb_b)
++GEN_VEXT_LD_INDEX(vlxb_v_h,  int8_t,   int16_t,  idx_h, ldb_h)
++GEN_VEXT_LD_INDEX(vlxb_v_w,  int8_t,   int32_t,  idx_w, ldb_w)
++GEN_VEXT_LD_INDEX(vlxb_v_d,  int8_t,   int64_t,  idx_d, ldb_d)
++GEN_VEXT_LD_INDEX(vlxh_v_h,  int16_t,  int16_t,  idx_h, ldh_h)
++GEN_VEXT_LD_INDEX(vlxh_v_w,  int16_t,  int32_t,  idx_w, ldh_w)
++GEN_VEXT_LD_INDEX(vlxh_v_d,  int16_t,  int64_t,  idx_d, ldh_d)
++GEN_VEXT_LD_INDEX(vlxw_v_w,  int32_t,  int32_t,  idx_w, ldw_w)
++GEN_VEXT_LD_INDEX(vlxw_v_d,  int32_t,  int64_t,  idx_d, ldw_d)
++GEN_VEXT_LD_INDEX(vlxe_v_b,  int8_t,   int8_t,   idx_b, lde_b)
++GEN_VEXT_LD_INDEX(vlxe_v_h,  int16_t,  int16_t,  idx_h, lde_h)
++GEN_VEXT_LD_INDEX(vlxe_v_w,  int32_t,  int32_t,  idx_w, lde_w)
++GEN_VEXT_LD_INDEX(vlxe_v_d,  int64_t,  int64_t,  idx_d, lde_d)
++GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t,  uint8_t,  idx_b, ldbu_b)
++GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t,  uint16_t, idx_h, ldbu_h)
++GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t,  uint32_t, idx_w, ldbu_w)
++GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t,  uint64_t, idx_d, ldbu_d)
++GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h)
++GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w)
++GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d)
++GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w)
++GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d)
+ 
+ #define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,         \
+                   void *vs2, CPURISCVState *env, uint32_t desc)  \
+ {                                                                \
+     vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN,      \
+-                    STORE_FN, NULL, sizeof(ETYPE), sizeof(MTYPE),\
++                    STORE_FN, sizeof(ETYPE), sizeof(MTYPE),      \
+                     GETPC(), MMU_DATA_STORE);                    \
+ }
+ 
+@@ -609,7 +539,6 @@ static inline void
+ vext_ldff(void *vd, void *v0, target_ulong base,
+           CPURISCVState *env, uint32_t desc,
+           vext_ldst_elem_fn *ldst_elem,
+-          clear_fn *clear_elem,
+           uint32_t esz, uint32_t msz, uintptr_t ra)
+ {
+     void *host;
+@@ -671,45 +600,38 @@ ProbeSuccess:
+             k++;
+         }
+     }
+-    /* clear tail elements */
+-    if (vl != 0) {
+-        return;
+-    }
+-    for (k = 0; k < nf; k++) {
+-        clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+-    }
+ }
+ 
+-#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN)     \
++#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN)               \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,         \
+                   CPURISCVState *env, uint32_t desc)             \
+ {                                                                \
+-    vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN,        \
++    vext_ldff(vd, v0, base, env, desc, LOAD_FN,                  \
+               sizeof(ETYPE), sizeof(MTYPE), GETPC());            \
+ }
+ 
+-GEN_VEXT_LDFF(vlbff_v_b,  int8_t,   int8_t,   ldb_b,  clearb)
+-GEN_VEXT_LDFF(vlbff_v_h,  int8_t,   int16_t,  ldb_h,  clearh)
+-GEN_VEXT_LDFF(vlbff_v_w,  int8_t,   int32_t,  ldb_w,  clearl)
+-GEN_VEXT_LDFF(vlbff_v_d,  int8_t,   int64_t,  ldb_d,  clearq)
+-GEN_VEXT_LDFF(vlhff_v_h,  int16_t,  int16_t,  ldh_h,  clearh)
+-GEN_VEXT_LDFF(vlhff_v_w,  int16_t,  int32_t,  ldh_w,  clearl)
+-GEN_VEXT_LDFF(vlhff_v_d,  int16_t,  int64_t,  ldh_d,  clearq)
+-GEN_VEXT_LDFF(vlwff_v_w,  int32_t,  int32_t,  ldw_w,  clearl)
+-GEN_VEXT_LDFF(vlwff_v_d,  int32_t,  int64_t,  ldw_d,  clearq)
+-GEN_VEXT_LDFF(vleff_v_b,  int8_t,   int8_t,   lde_b,  clearb)
+-GEN_VEXT_LDFF(vleff_v_h,  int16_t,  int16_t,  lde_h,  clearh)
+-GEN_VEXT_LDFF(vleff_v_w,  int32_t,  int32_t,  lde_w,  clearl)
+-GEN_VEXT_LDFF(vleff_v_d,  int64_t,  int64_t,  lde_d,  clearq)
+-GEN_VEXT_LDFF(vlbuff_v_b, uint8_t,  uint8_t,  ldbu_b, clearb)
+-GEN_VEXT_LDFF(vlbuff_v_h, uint8_t,  uint16_t, ldbu_h, clearh)
+-GEN_VEXT_LDFF(vlbuff_v_w, uint8_t,  uint32_t, ldbu_w, clearl)
+-GEN_VEXT_LDFF(vlbuff_v_d, uint8_t,  uint64_t, ldbu_d, clearq)
+-GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h, clearh)
+-GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl)
+-GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq)
+-GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl)
+-GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq)
++GEN_VEXT_LDFF(vlbff_v_b,  int8_t,   int8_t,   ldb_b)
++GEN_VEXT_LDFF(vlbff_v_h,  int8_t,   int16_t,  ldb_h)
++GEN_VEXT_LDFF(vlbff_v_w,  int8_t,   int32_t,  ldb_w)
++GEN_VEXT_LDFF(vlbff_v_d,  int8_t,   int64_t,  ldb_d)
++GEN_VEXT_LDFF(vlhff_v_h,  int16_t,  int16_t,  ldh_h)
++GEN_VEXT_LDFF(vlhff_v_w,  int16_t,  int32_t,  ldh_w)
++GEN_VEXT_LDFF(vlhff_v_d,  int16_t,  int64_t,  ldh_d)
++GEN_VEXT_LDFF(vlwff_v_w,  int32_t,  int32_t,  ldw_w)
++GEN_VEXT_LDFF(vlwff_v_d,  int32_t,  int64_t,  ldw_d)
++GEN_VEXT_LDFF(vleff_v_b,  int8_t,   int8_t,   lde_b)
++GEN_VEXT_LDFF(vleff_v_h,  int16_t,  int16_t,  lde_h)
++GEN_VEXT_LDFF(vleff_v_w,  int32_t,  int32_t,  lde_w)
++GEN_VEXT_LDFF(vleff_v_d,  int64_t,  int64_t,  lde_d)
++GEN_VEXT_LDFF(vlbuff_v_b, uint8_t,  uint8_t,  ldbu_b)
++GEN_VEXT_LDFF(vlbuff_v_h, uint8_t,  uint16_t, ldbu_h)
++GEN_VEXT_LDFF(vlbuff_v_w, uint8_t,  uint32_t, ldbu_w)
++GEN_VEXT_LDFF(vlbuff_v_d, uint8_t,  uint64_t, ldbu_d)
++GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h)
++GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w)
++GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d)
++GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w)
++GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d)
+ 
+ /*
+  *** Vector AMO Operations (Zvamo)
+@@ -786,14 +708,12 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+                   void *vs2, CPURISCVState *env, uint32_t desc,
+                   vext_get_index_addr get_index_addr,
+                   vext_amo_noatomic_fn *noatomic_op,
+-                  clear_fn *clear_elem,
+                   uint32_t esz, uint32_t msz, uintptr_t ra)
+ {
+     uint32_t i;
+     target_long addr;
+     uint32_t wd = vext_wd(desc);
+     uint32_t vm = vext_vm(desc);
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
+ 
+     for (i = 0; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+@@ -809,48 +729,47 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+         addr = get_index_addr(base, i, vs2);
+         noatomic_op(vs3, addr, wd, i, env, ra);
+     }
+-    clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz);
+ }
+ 
+-#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN)    \
++#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN)              \
+ void HELPER(NAME)(void *vs3, void *v0, target_ulong base,       \
+                   void *vs2, CPURISCVState *env, uint32_t desc) \
+ {                                                               \
+     vext_amo_noatomic(vs3, v0, base, vs2, env, desc,            \
+                       INDEX_FN, vext_##NAME##_noatomic_op,      \
+-                      CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE),   \
++                      sizeof(ETYPE), sizeof(MTYPE),             \
+                       GETPC());                                 \
+ }
+ 
+ #ifdef TARGET_RISCV64
+-GEN_VEXT_AMO(vamoswapw_v_d, int32_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoswapd_v_d, int64_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoaddw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoaddd_v_d,  int64_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoxorw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoxord_v_d,  int64_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoandw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoandd_v_d,  int64_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoorw_v_d,   int32_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamoord_v_d,   int64_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamominw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamomind_v_d,  int64_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamomaxw_v_d,  int32_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamomaxd_v_d,  int64_t,  int64_t,  idx_d, clearq)
+-GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+-GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq)
+-GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+-GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq)
++GEN_VEXT_AMO(vamoswapw_v_d, int32_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoswapd_v_d, int64_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoaddw_v_d,  int32_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoaddd_v_d,  int64_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoxorw_v_d,  int32_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoxord_v_d,  int64_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoandw_v_d,  int32_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoandd_v_d,  int64_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoorw_v_d,   int32_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamoord_v_d,   int64_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamominw_v_d,  int32_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamomind_v_d,  int64_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamomaxw_v_d,  int32_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamomaxd_v_d,  int64_t,  int64_t,  idx_d)
++GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d)
++GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d)
++GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d)
++GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d)
+ #endif
+-GEN_VEXT_AMO(vamoswapw_v_w, int32_t,  int32_t,  idx_w, clearl)
+-GEN_VEXT_AMO(vamoaddw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+-GEN_VEXT_AMO(vamoxorw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+-GEN_VEXT_AMO(vamoandw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+-GEN_VEXT_AMO(vamoorw_v_w,   int32_t,  int32_t,  idx_w, clearl)
+-GEN_VEXT_AMO(vamominw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+-GEN_VEXT_AMO(vamomaxw_v_w,  int32_t,  int32_t,  idx_w, clearl)
+-GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl)
+-GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl)
++GEN_VEXT_AMO(vamoswapw_v_w, int32_t,  int32_t,  idx_w)
++GEN_VEXT_AMO(vamoaddw_v_w,  int32_t,  int32_t,  idx_w)
++GEN_VEXT_AMO(vamoxorw_v_w,  int32_t,  int32_t,  idx_w)
++GEN_VEXT_AMO(vamoandw_v_w,  int32_t,  int32_t,  idx_w)
++GEN_VEXT_AMO(vamoorw_v_w,   int32_t,  int32_t,  idx_w)
++GEN_VEXT_AMO(vamominw_v_w,  int32_t,  int32_t,  idx_w)
++GEN_VEXT_AMO(vamomaxw_v_w,  int32_t,  int32_t,  idx_w)
++GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w)
++GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w)
+ 
+ /*
+  *** Vector Integer Arithmetic Instructions
+@@ -916,9 +835,8 @@ RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB)
+ static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
+                        CPURISCVState *env, uint32_t desc,
+                        uint32_t esz, uint32_t dsz,
+-                       opivv2_fn *fn, clear_fn *clearfn)
++                       opivv2_fn *fn)
+ {
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+@@ -929,27 +847,26 @@ static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
+         }
+         fn(vd, vs1, vs2, i);
+     }
+-    clearfn(vd, vl, vl * dsz,  vlmax * dsz);
+ }
+ 
+ /* generate the helpers for OPIVV */
+-#define GEN_VEXT_VV(NAME, ESZ, DSZ, CLEAR_FN)             \
++#define GEN_VEXT_VV(NAME, ESZ, DSZ)                       \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+                   void *vs2, CPURISCVState *env,          \
+                   uint32_t desc)                          \
+ {                                                         \
+     do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ,     \
+-               do_##NAME, CLEAR_FN);                      \
++               do_##NAME);                                \
+ }
+ 
+-GEN_VEXT_VV(vadd_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vadd_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vadd_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vadd_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vsub_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vsub_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vsub_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vsub_vv_d, 8, 8, clearq)
++GEN_VEXT_VV(vadd_vv_b, 1, 1)
++GEN_VEXT_VV(vadd_vv_h, 2, 2)
++GEN_VEXT_VV(vadd_vv_w, 4, 4)
++GEN_VEXT_VV(vadd_vv_d, 8, 8)
++GEN_VEXT_VV(vsub_vv_b, 1, 1)
++GEN_VEXT_VV(vsub_vv_h, 2, 2)
++GEN_VEXT_VV(vsub_vv_w, 4, 4)
++GEN_VEXT_VV(vsub_vv_d, 8, 8)
+ 
+ typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
+ 
+@@ -980,9 +897,8 @@ RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB)
+ static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
+                        CPURISCVState *env, uint32_t desc,
+                        uint32_t esz, uint32_t dsz,
+-                       opivx2_fn fn, clear_fn *clearfn)
++                       opivx2_fn fn)
+ {
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+@@ -993,31 +909,30 @@ static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
+         }
+         fn(vd, s1, vs2, i);
+     }
+-    clearfn(vd, vl, vl * dsz,  vlmax * dsz);
+ }
+ 
+ /* generate the helpers for OPIVX */
+-#define GEN_VEXT_VX(NAME, ESZ, DSZ, CLEAR_FN)             \
++#define GEN_VEXT_VX(NAME, ESZ, DSZ)                       \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,    \
+                   void *vs2, CPURISCVState *env,          \
+                   uint32_t desc)                          \
+ {                                                         \
+     do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ,      \
+-               do_##NAME, CLEAR_FN);                      \
+-}
+-
+-GEN_VEXT_VX(vadd_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vadd_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vadd_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vadd_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vsub_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vsub_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vsub_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vsub_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vrsub_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vrsub_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vrsub_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vrsub_vx_d, 8, 8, clearq)
++               do_##NAME);                                \
++}
++
++GEN_VEXT_VX(vadd_vx_b, 1, 1)
++GEN_VEXT_VX(vadd_vx_h, 2, 2)
++GEN_VEXT_VX(vadd_vx_w, 4, 4)
++GEN_VEXT_VX(vadd_vx_d, 8, 8)
++GEN_VEXT_VX(vsub_vx_b, 1, 1)
++GEN_VEXT_VX(vsub_vx_h, 2, 2)
++GEN_VEXT_VX(vsub_vx_w, 4, 4)
++GEN_VEXT_VX(vsub_vx_d, 8, 8)
++GEN_VEXT_VX(vrsub_vx_b, 1, 1)
++GEN_VEXT_VX(vrsub_vx_h, 2, 2)
++GEN_VEXT_VX(vrsub_vx_w, 4, 4)
++GEN_VEXT_VX(vrsub_vx_d, 8, 8)
+ 
+ void HELPER(vec_rsubs8)(void *d, void *a, uint64_t b, uint32_t desc)
+ {
+@@ -1096,30 +1011,30 @@ RVVCALL(OPIVV2, vwadd_wv_w, WOP_WSSS_W, H8, H4, H4, DO_ADD)
+ RVVCALL(OPIVV2, vwsub_wv_b, WOP_WSSS_B, H2, H1, H1, DO_SUB)
+ RVVCALL(OPIVV2, vwsub_wv_h, WOP_WSSS_H, H4, H2, H2, DO_SUB)
+ RVVCALL(OPIVV2, vwsub_wv_w, WOP_WSSS_W, H8, H4, H4, DO_SUB)
+-GEN_VEXT_VV(vwaddu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwaddu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwaddu_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwsubu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwsubu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwsubu_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwadd_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwadd_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwadd_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwsub_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwsub_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwsub_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwaddu_wv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwaddu_wv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwaddu_wv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwsubu_wv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwsubu_wv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwsubu_wv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwadd_wv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwadd_wv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwadd_wv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwsub_wv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwsub_wv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwsub_wv_w, 4, 8, clearq)
++GEN_VEXT_VV(vwaddu_vv_b, 1, 2)
++GEN_VEXT_VV(vwaddu_vv_h, 2, 4)
++GEN_VEXT_VV(vwaddu_vv_w, 4, 8)
++GEN_VEXT_VV(vwsubu_vv_b, 1, 2)
++GEN_VEXT_VV(vwsubu_vv_h, 2, 4)
++GEN_VEXT_VV(vwsubu_vv_w, 4, 8)
++GEN_VEXT_VV(vwadd_vv_b, 1, 2)
++GEN_VEXT_VV(vwadd_vv_h, 2, 4)
++GEN_VEXT_VV(vwadd_vv_w, 4, 8)
++GEN_VEXT_VV(vwsub_vv_b, 1, 2)
++GEN_VEXT_VV(vwsub_vv_h, 2, 4)
++GEN_VEXT_VV(vwsub_vv_w, 4, 8)
++GEN_VEXT_VV(vwaddu_wv_b, 1, 2)
++GEN_VEXT_VV(vwaddu_wv_h, 2, 4)
++GEN_VEXT_VV(vwaddu_wv_w, 4, 8)
++GEN_VEXT_VV(vwsubu_wv_b, 1, 2)
++GEN_VEXT_VV(vwsubu_wv_h, 2, 4)
++GEN_VEXT_VV(vwsubu_wv_w, 4, 8)
++GEN_VEXT_VV(vwadd_wv_b, 1, 2)
++GEN_VEXT_VV(vwadd_wv_h, 2, 4)
++GEN_VEXT_VV(vwadd_wv_w, 4, 8)
++GEN_VEXT_VV(vwsub_wv_b, 1, 2)
++GEN_VEXT_VV(vwsub_wv_h, 2, 4)
++GEN_VEXT_VV(vwsub_wv_w, 4, 8)
+ 
+ RVVCALL(OPIVX2, vwaddu_vx_b, WOP_UUU_B, H2, H1, DO_ADD)
+ RVVCALL(OPIVX2, vwaddu_vx_h, WOP_UUU_H, H4, H2, DO_ADD)
+@@ -1145,42 +1060,40 @@ RVVCALL(OPIVX2, vwadd_wx_w, WOP_WSSS_W, H8, H4, DO_ADD)
+ RVVCALL(OPIVX2, vwsub_wx_b, WOP_WSSS_B, H2, H1, DO_SUB)
+ RVVCALL(OPIVX2, vwsub_wx_h, WOP_WSSS_H, H4, H2, DO_SUB)
+ RVVCALL(OPIVX2, vwsub_wx_w, WOP_WSSS_W, H8, H4, DO_SUB)
+-GEN_VEXT_VX(vwaddu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwaddu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwaddu_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwsubu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwsubu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwsubu_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwadd_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwadd_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwadd_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwsub_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwsub_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwsub_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwaddu_wx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwaddu_wx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwaddu_wx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwsubu_wx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwsubu_wx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwsubu_wx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwadd_wx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwadd_wx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwadd_wx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwsub_wx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwsub_wx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq)
++GEN_VEXT_VX(vwaddu_vx_b, 1, 2)
++GEN_VEXT_VX(vwaddu_vx_h, 2, 4)
++GEN_VEXT_VX(vwaddu_vx_w, 4, 8)
++GEN_VEXT_VX(vwsubu_vx_b, 1, 2)
++GEN_VEXT_VX(vwsubu_vx_h, 2, 4)
++GEN_VEXT_VX(vwsubu_vx_w, 4, 8)
++GEN_VEXT_VX(vwadd_vx_b, 1, 2)
++GEN_VEXT_VX(vwadd_vx_h, 2, 4)
++GEN_VEXT_VX(vwadd_vx_w, 4, 8)
++GEN_VEXT_VX(vwsub_vx_b, 1, 2)
++GEN_VEXT_VX(vwsub_vx_h, 2, 4)
++GEN_VEXT_VX(vwsub_vx_w, 4, 8)
++GEN_VEXT_VX(vwaddu_wx_b, 1, 2)
++GEN_VEXT_VX(vwaddu_wx_h, 2, 4)
++GEN_VEXT_VX(vwaddu_wx_w, 4, 8)
++GEN_VEXT_VX(vwsubu_wx_b, 1, 2)
++GEN_VEXT_VX(vwsubu_wx_h, 2, 4)
++GEN_VEXT_VX(vwsubu_wx_w, 4, 8)
++GEN_VEXT_VX(vwadd_wx_b, 1, 2)
++GEN_VEXT_VX(vwadd_wx_h, 2, 4)
++GEN_VEXT_VX(vwadd_wx_w, 4, 8)
++GEN_VEXT_VX(vwsub_wx_b, 1, 2)
++GEN_VEXT_VX(vwsub_wx_h, 2, 4)
++GEN_VEXT_VX(vwsub_wx_w, 4, 8)
+ 
+ /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
+ #define DO_VADC(N, M, C) (N + M + C)
+ #define DO_VSBC(N, M, C) (N - M - C)
+ 
+-#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP, CLEAR_FN)    \
++#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP)              \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+     uint32_t vl = env->vl;                                    \
+-    uint32_t esz = sizeof(ETYPE);                             \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                  \
+     uint32_t i;                                               \
+                                                               \
+     for (i = 0; i < vl; i++) {                                \
+@@ -1190,26 +1103,23 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+                                                               \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry);         \
+     }                                                         \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                  \
+ }
+ 
+-GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t,  H1, DO_VADC, clearb)
+-GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC, clearh)
+-GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC, clearl)
+-GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC, clearq)
++GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t,  H1, DO_VADC)
++GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC)
++GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC)
++GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC)
+ 
+-GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t,  H1, DO_VSBC, clearb)
+-GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC, clearh)
+-GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC, clearl)
+-GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq)
++GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t,  H1, DO_VSBC)
++GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC)
++GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC)
++GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC)
+ 
+-#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP, CLEAR_FN)               \
++#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP)                         \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,        \
+                   CPURISCVState *env, uint32_t desc)                     \
+ {                                                                        \
+     uint32_t vl = env->vl;                                               \
+-    uint32_t esz = sizeof(ETYPE);                                        \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                             \
+     uint32_t i;                                                          \
+                                                                          \
+     for (i = 0; i < vl; i++) {                                           \
+@@ -1218,18 +1128,17 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,        \
+                                                                          \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
+     }                                                                    \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                             \
+ }
+ 
+-GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t,  H1, DO_VADC, clearb)
+-GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC, clearh)
+-GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC, clearl)
+-GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC, clearq)
++GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t,  H1, DO_VADC)
++GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC)
++GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC)
++GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC)
+ 
+-GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t,  H1, DO_VSBC, clearb)
+-GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC, clearh)
+-GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC, clearl)
+-GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq)
++GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t,  H1, DO_VSBC)
++GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC)
++GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC)
++GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC)
+ 
+ #define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N :           \
+                           (__typeof(N))(N + M) < N)
+@@ -1308,18 +1217,18 @@ RVVCALL(OPIVV2, vxor_vv_b, OP_SSS_B, H1, H1, H1, DO_XOR)
+ RVVCALL(OPIVV2, vxor_vv_h, OP_SSS_H, H2, H2, H2, DO_XOR)
+ RVVCALL(OPIVV2, vxor_vv_w, OP_SSS_W, H4, H4, H4, DO_XOR)
+ RVVCALL(OPIVV2, vxor_vv_d, OP_SSS_D, H8, H8, H8, DO_XOR)
+-GEN_VEXT_VV(vand_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vand_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vand_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vand_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vor_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vor_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vor_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vor_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vxor_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vxor_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vxor_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vxor_vv_d, 8, 8, clearq)
++GEN_VEXT_VV(vand_vv_b, 1, 1)
++GEN_VEXT_VV(vand_vv_h, 2, 2)
++GEN_VEXT_VV(vand_vv_w, 4, 4)
++GEN_VEXT_VV(vand_vv_d, 8, 8)
++GEN_VEXT_VV(vor_vv_b, 1, 1)
++GEN_VEXT_VV(vor_vv_h, 2, 2)
++GEN_VEXT_VV(vor_vv_w, 4, 4)
++GEN_VEXT_VV(vor_vv_d, 8, 8)
++GEN_VEXT_VV(vxor_vv_b, 1, 1)
++GEN_VEXT_VV(vxor_vv_h, 2, 2)
++GEN_VEXT_VV(vxor_vv_w, 4, 4)
++GEN_VEXT_VV(vxor_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2, vand_vx_b, OP_SSS_B, H1, H1, DO_AND)
+ RVVCALL(OPIVX2, vand_vx_h, OP_SSS_H, H2, H2, DO_AND)
+@@ -1333,32 +1242,30 @@ RVVCALL(OPIVX2, vxor_vx_b, OP_SSS_B, H1, H1, DO_XOR)
+ RVVCALL(OPIVX2, vxor_vx_h, OP_SSS_H, H2, H2, DO_XOR)
+ RVVCALL(OPIVX2, vxor_vx_w, OP_SSS_W, H4, H4, DO_XOR)
+ RVVCALL(OPIVX2, vxor_vx_d, OP_SSS_D, H8, H8, DO_XOR)
+-GEN_VEXT_VX(vand_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vand_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vand_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vand_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vor_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vor_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vor_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vor_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vxor_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vxor_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vxor_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq)
++GEN_VEXT_VX(vand_vx_b, 1, 1)
++GEN_VEXT_VX(vand_vx_h, 2, 2)
++GEN_VEXT_VX(vand_vx_w, 4, 4)
++GEN_VEXT_VX(vand_vx_d, 8, 8)
++GEN_VEXT_VX(vor_vx_b, 1, 1)
++GEN_VEXT_VX(vor_vx_h, 2, 2)
++GEN_VEXT_VX(vor_vx_w, 4, 4)
++GEN_VEXT_VX(vor_vx_d, 8, 8)
++GEN_VEXT_VX(vxor_vx_b, 1, 1)
++GEN_VEXT_VX(vxor_vx_h, 2, 2)
++GEN_VEXT_VX(vxor_vx_w, 4, 4)
++GEN_VEXT_VX(vxor_vx_d, 8, 8)
+ 
+ /* Vector Single-Width Bit Shift Instructions */
+ #define DO_SLL(N, M)  (N << (M))
+ #define DO_SRL(N, M)  (N >> (M))
+ 
+ /* generate the helpers for shift instructions with two vector operators */
+-#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK, CLEAR_FN)   \
++#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK)             \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1,                          \
+                   void *vs2, CPURISCVState *env, uint32_t desc)           \
+ {                                                                         \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+-    uint32_t esz = sizeof(TS1);                                           \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                              \
+     uint32_t i;                                                           \
+                                                                           \
+     for (i = 0; i < vl; i++) {                                            \
+@@ -1369,73 +1276,69 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,                          \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                                  \
+         *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK);                        \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                              \
+ }
+ 
+-GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t,  uint8_t, H1, H1, DO_SLL, 0x7, clearb)
+-GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf, clearh)
+-GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f, clearl)
+-GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f, clearq)
++GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t,  uint8_t, H1, H1, DO_SLL, 0x7)
++GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf)
++GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f)
++GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f)
+ 
+-GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb)
+-GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh)
+-GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl)
+-GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq)
++GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7)
++GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f)
+ 
+-GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t,  int8_t, H1, H1, DO_SRL, 0x7, clearb)
+-GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh)
+-GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl)
+-GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
++GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t,  int8_t, H1, H1, DO_SRL, 0x7)
++GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f)
+ 
+ /* generate the helpers for shift instructions with one vector and one scalar */
+-#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK, CLEAR_FN) \
+-void HELPER(NAME)(void *vd, void *v0, target_ulong s1,                \
+-        void *vs2, CPURISCVState *env, uint32_t desc)                 \
+-{                                                                     \
+-    uint32_t vm = vext_vm(desc);                                      \
+-    uint32_t vl = env->vl;                                            \
+-    uint32_t esz = sizeof(TD);                                        \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                          \
+-    uint32_t i;                                                       \
+-                                                                      \
+-    for (i = 0; i < vl; i++) {                                        \
+-        if (!vm && !vext_elem_mask(v0, i)) {                          \
+-            continue;                                                 \
+-        }                                                             \
+-        TS2 s2 = *((TS2 *)vs2 + HS2(i));                              \
+-        *((TD *)vd + HD(i)) = OP(s2, s1 & MASK);                      \
+-    }                                                                 \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                          \
+-}
+-
+-GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7, clearb)
+-GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf, clearh)
+-GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f, clearl)
+-GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f, clearq)
+-
+-GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb)
+-GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh)
+-GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl)
+-GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq)
+-
+-GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb)
+-GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh)
+-GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl)
+-GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
++#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK) \
++void HELPER(NAME)(void *vd, void *v0, target_ulong s1,      \
++        void *vs2, CPURISCVState *env, uint32_t desc)       \
++{                                                           \
++    uint32_t vm = vext_vm(desc);                            \
++    uint32_t vl = env->vl;                                  \
++    uint32_t i;                                             \
++                                                            \
++    for (i = 0; i < vl; i++) {                              \
++        if (!vm && !vext_elem_mask(v0, i)) {                \
++            continue;                                       \
++        }                                                   \
++        TS2 s2 = *((TS2 *)vs2 + HS2(i));                    \
++        *((TD *)vd + HD(i)) = OP(s2, s1 & MASK);            \
++    }                                                       \
++}
++
++GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
++GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf)
++GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f)
++GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f)
++
++GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7)
++GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f)
++
++GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7)
++GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f)
+ 
+ /* Vector Narrowing Integer Right Shift Instructions */
+-GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t,  uint16_t, H1, H2, DO_SRL, 0xf, clearb)
+-GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
+-GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
+-GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t,  int16_t, H1, H2, DO_SRL, 0xf, clearb)
+-GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
+-GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
+-GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
+-GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
+-GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
+-GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
+-GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
+-GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
++GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t,  uint16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
++GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t,  int16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f)
++GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
++GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f)
+ 
+ /* Vector Integer Comparison Instructions */
+ #define DO_MSEQ(N, M) (N == M)
+@@ -1575,22 +1478,22 @@ RVVCALL(OPIVV2, vmax_vv_b, OP_SSS_B, H1, H1, H1, DO_MAX)
+ RVVCALL(OPIVV2, vmax_vv_h, OP_SSS_H, H2, H2, H2, DO_MAX)
+ RVVCALL(OPIVV2, vmax_vv_w, OP_SSS_W, H4, H4, H4, DO_MAX)
+ RVVCALL(OPIVV2, vmax_vv_d, OP_SSS_D, H8, H8, H8, DO_MAX)
+-GEN_VEXT_VV(vminu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vminu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vminu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vminu_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vmin_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmin_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmin_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmin_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vmaxu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmaxu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmaxu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmaxu_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vmax_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmax_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmax_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmax_vv_d, 8, 8, clearq)
++GEN_VEXT_VV(vminu_vv_b, 1, 1)
++GEN_VEXT_VV(vminu_vv_h, 2, 2)
++GEN_VEXT_VV(vminu_vv_w, 4, 4)
++GEN_VEXT_VV(vminu_vv_d, 8, 8)
++GEN_VEXT_VV(vmin_vv_b, 1, 1)
++GEN_VEXT_VV(vmin_vv_h, 2, 2)
++GEN_VEXT_VV(vmin_vv_w, 4, 4)
++GEN_VEXT_VV(vmin_vv_d, 8, 8)
++GEN_VEXT_VV(vmaxu_vv_b, 1, 1)
++GEN_VEXT_VV(vmaxu_vv_h, 2, 2)
++GEN_VEXT_VV(vmaxu_vv_w, 4, 4)
++GEN_VEXT_VV(vmaxu_vv_d, 8, 8)
++GEN_VEXT_VV(vmax_vv_b, 1, 1)
++GEN_VEXT_VV(vmax_vv_h, 2, 2)
++GEN_VEXT_VV(vmax_vv_w, 4, 4)
++GEN_VEXT_VV(vmax_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2, vminu_vx_b, OP_UUU_B, H1, H1, DO_MIN)
+ RVVCALL(OPIVX2, vminu_vx_h, OP_UUU_H, H2, H2, DO_MIN)
+@@ -1608,22 +1511,22 @@ RVVCALL(OPIVX2, vmax_vx_b, OP_SSS_B, H1, H1, DO_MAX)
+ RVVCALL(OPIVX2, vmax_vx_h, OP_SSS_H, H2, H2, DO_MAX)
+ RVVCALL(OPIVX2, vmax_vx_w, OP_SSS_W, H4, H4, DO_MAX)
+ RVVCALL(OPIVX2, vmax_vx_d, OP_SSS_D, H8, H8, DO_MAX)
+-GEN_VEXT_VX(vminu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vminu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vminu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vminu_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vmin_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmin_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmin_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmin_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vmaxu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmaxu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmaxu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmaxu_vx_d, 8, 8,  clearq)
+-GEN_VEXT_VX(vmax_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmax_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmax_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmax_vx_d, 8, 8, clearq)
++GEN_VEXT_VX(vminu_vx_b, 1, 1)
++GEN_VEXT_VX(vminu_vx_h, 2, 2)
++GEN_VEXT_VX(vminu_vx_w, 4, 4)
++GEN_VEXT_VX(vminu_vx_d, 8, 8)
++GEN_VEXT_VX(vmin_vx_b, 1, 1)
++GEN_VEXT_VX(vmin_vx_h, 2, 2)
++GEN_VEXT_VX(vmin_vx_w, 4, 4)
++GEN_VEXT_VX(vmin_vx_d, 8, 8)
++GEN_VEXT_VX(vmaxu_vx_b, 1, 1)
++GEN_VEXT_VX(vmaxu_vx_h, 2, 2)
++GEN_VEXT_VX(vmaxu_vx_w, 4, 4)
++GEN_VEXT_VX(vmaxu_vx_d, 8, 8)
++GEN_VEXT_VX(vmax_vx_b, 1, 1)
++GEN_VEXT_VX(vmax_vx_h, 2, 2)
++GEN_VEXT_VX(vmax_vx_w, 4, 4)
++GEN_VEXT_VX(vmax_vx_d, 8, 8)
+ 
+ /* Vector Single-Width Integer Multiply Instructions */
+ #define DO_MUL(N, M) (N * M)
+@@ -1631,10 +1534,10 @@ RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL)
+ RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL)
+ RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL)
+ RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL)
+-GEN_VEXT_VV(vmul_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmul_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmul_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmul_vv_d, 8, 8, clearq)
++GEN_VEXT_VV(vmul_vv_b, 1, 1)
++GEN_VEXT_VV(vmul_vv_h, 2, 2)
++GEN_VEXT_VV(vmul_vv_w, 4, 4)
++GEN_VEXT_VV(vmul_vv_d, 8, 8)
+ 
+ static int8_t do_mulh_b(int8_t s2, int8_t s1)
+ {
+@@ -1738,18 +1641,18 @@ RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b)
+ RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h)
+ RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w)
+ RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d)
+-GEN_VEXT_VV(vmulh_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmulh_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmulh_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmulh_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vmulhu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmulhu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmulhu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmulhu_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vmulhsu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmulhsu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmulhsu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmulhsu_vv_d, 8, 8, clearq)
++GEN_VEXT_VV(vmulh_vv_b, 1, 1)
++GEN_VEXT_VV(vmulh_vv_h, 2, 2)
++GEN_VEXT_VV(vmulh_vv_w, 4, 4)
++GEN_VEXT_VV(vmulh_vv_d, 8, 8)
++GEN_VEXT_VV(vmulhu_vv_b, 1, 1)
++GEN_VEXT_VV(vmulhu_vv_h, 2, 2)
++GEN_VEXT_VV(vmulhu_vv_w, 4, 4)
++GEN_VEXT_VV(vmulhu_vv_d, 8, 8)
++GEN_VEXT_VV(vmulhsu_vv_b, 1, 1)
++GEN_VEXT_VV(vmulhsu_vv_h, 2, 2)
++GEN_VEXT_VV(vmulhsu_vv_w, 4, 4)
++GEN_VEXT_VV(vmulhsu_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL)
+ RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL)
+@@ -1767,22 +1670,22 @@ RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b)
+ RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h)
+ RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w)
+ RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d)
+-GEN_VEXT_VX(vmul_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmul_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmul_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmul_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vmulh_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmulh_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmulh_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmulh_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vmulhu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmulhu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmulhu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmulhu_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vmulhsu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmulhsu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmulhsu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmulhsu_vx_d, 8, 8, clearq)
++GEN_VEXT_VX(vmul_vx_b, 1, 1)
++GEN_VEXT_VX(vmul_vx_h, 2, 2)
++GEN_VEXT_VX(vmul_vx_w, 4, 4)
++GEN_VEXT_VX(vmul_vx_d, 8, 8)
++GEN_VEXT_VX(vmulh_vx_b, 1, 1)
++GEN_VEXT_VX(vmulh_vx_h, 2, 2)
++GEN_VEXT_VX(vmulh_vx_w, 4, 4)
++GEN_VEXT_VX(vmulh_vx_d, 8, 8)
++GEN_VEXT_VX(vmulhu_vx_b, 1, 1)
++GEN_VEXT_VX(vmulhu_vx_h, 2, 2)
++GEN_VEXT_VX(vmulhu_vx_w, 4, 4)
++GEN_VEXT_VX(vmulhu_vx_d, 8, 8)
++GEN_VEXT_VX(vmulhsu_vx_b, 1, 1)
++GEN_VEXT_VX(vmulhsu_vx_h, 2, 2)
++GEN_VEXT_VX(vmulhsu_vx_w, 4, 4)
++GEN_VEXT_VX(vmulhsu_vx_d, 8, 8)
+ 
+ /* Vector Integer Divide Instructions */
+ #define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M)
+@@ -1808,22 +1711,22 @@ RVVCALL(OPIVV2, vrem_vv_b, OP_SSS_B, H1, H1, H1, DO_REM)
+ RVVCALL(OPIVV2, vrem_vv_h, OP_SSS_H, H2, H2, H2, DO_REM)
+ RVVCALL(OPIVV2, vrem_vv_w, OP_SSS_W, H4, H4, H4, DO_REM)
+ RVVCALL(OPIVV2, vrem_vv_d, OP_SSS_D, H8, H8, H8, DO_REM)
+-GEN_VEXT_VV(vdivu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vdivu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vdivu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vdivu_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vdiv_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vdiv_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vdiv_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vdiv_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vremu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vremu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vremu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vremu_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vrem_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vrem_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vrem_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vrem_vv_d, 8, 8, clearq)
++GEN_VEXT_VV(vdivu_vv_b, 1, 1)
++GEN_VEXT_VV(vdivu_vv_h, 2, 2)
++GEN_VEXT_VV(vdivu_vv_w, 4, 4)
++GEN_VEXT_VV(vdivu_vv_d, 8, 8)
++GEN_VEXT_VV(vdiv_vv_b, 1, 1)
++GEN_VEXT_VV(vdiv_vv_h, 2, 2)
++GEN_VEXT_VV(vdiv_vv_w, 4, 4)
++GEN_VEXT_VV(vdiv_vv_d, 8, 8)
++GEN_VEXT_VV(vremu_vv_b, 1, 1)
++GEN_VEXT_VV(vremu_vv_h, 2, 2)
++GEN_VEXT_VV(vremu_vv_w, 4, 4)
++GEN_VEXT_VV(vremu_vv_d, 8, 8)
++GEN_VEXT_VV(vrem_vv_b, 1, 1)
++GEN_VEXT_VV(vrem_vv_h, 2, 2)
++GEN_VEXT_VV(vrem_vv_w, 4, 4)
++GEN_VEXT_VV(vrem_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2, vdivu_vx_b, OP_UUU_B, H1, H1, DO_DIVU)
+ RVVCALL(OPIVX2, vdivu_vx_h, OP_UUU_H, H2, H2, DO_DIVU)
+@@ -1841,22 +1744,22 @@ RVVCALL(OPIVX2, vrem_vx_b, OP_SSS_B, H1, H1, DO_REM)
+ RVVCALL(OPIVX2, vrem_vx_h, OP_SSS_H, H2, H2, DO_REM)
+ RVVCALL(OPIVX2, vrem_vx_w, OP_SSS_W, H4, H4, DO_REM)
+ RVVCALL(OPIVX2, vrem_vx_d, OP_SSS_D, H8, H8, DO_REM)
+-GEN_VEXT_VX(vdivu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vdivu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vdivu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vdivu_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vdiv_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vdiv_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vdiv_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vdiv_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vremu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vremu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vremu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vremu_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vrem_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vrem_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vrem_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vrem_vx_d, 8, 8, clearq)
++GEN_VEXT_VX(vdivu_vx_b, 1, 1)
++GEN_VEXT_VX(vdivu_vx_h, 2, 2)
++GEN_VEXT_VX(vdivu_vx_w, 4, 4)
++GEN_VEXT_VX(vdivu_vx_d, 8, 8)
++GEN_VEXT_VX(vdiv_vx_b, 1, 1)
++GEN_VEXT_VX(vdiv_vx_h, 2, 2)
++GEN_VEXT_VX(vdiv_vx_w, 4, 4)
++GEN_VEXT_VX(vdiv_vx_d, 8, 8)
++GEN_VEXT_VX(vremu_vx_b, 1, 1)
++GEN_VEXT_VX(vremu_vx_h, 2, 2)
++GEN_VEXT_VX(vremu_vx_w, 4, 4)
++GEN_VEXT_VX(vremu_vx_d, 8, 8)
++GEN_VEXT_VX(vrem_vx_b, 1, 1)
++GEN_VEXT_VX(vrem_vx_h, 2, 2)
++GEN_VEXT_VX(vrem_vx_w, 4, 4)
++GEN_VEXT_VX(vrem_vx_d, 8, 8)
+ 
+ /* Vector Widening Integer Multiply Instructions */
+ RVVCALL(OPIVV2, vwmul_vv_b, WOP_SSS_B, H2, H1, H1, DO_MUL)
+@@ -1868,15 +1771,15 @@ RVVCALL(OPIVV2, vwmulu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MUL)
+ RVVCALL(OPIVV2, vwmulsu_vv_b, WOP_SUS_B, H2, H1, H1, DO_MUL)
+ RVVCALL(OPIVV2, vwmulsu_vv_h, WOP_SUS_H, H4, H2, H2, DO_MUL)
+ RVVCALL(OPIVV2, vwmulsu_vv_w, WOP_SUS_W, H8, H4, H4, DO_MUL)
+-GEN_VEXT_VV(vwmul_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwmul_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwmul_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwmulu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwmulu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwmulu_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwmulsu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwmulsu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwmulsu_vv_w, 4, 8, clearq)
++GEN_VEXT_VV(vwmul_vv_b, 1, 2)
++GEN_VEXT_VV(vwmul_vv_h, 2, 4)
++GEN_VEXT_VV(vwmul_vv_w, 4, 8)
++GEN_VEXT_VV(vwmulu_vv_b, 1, 2)
++GEN_VEXT_VV(vwmulu_vv_h, 2, 4)
++GEN_VEXT_VV(vwmulu_vv_w, 4, 8)
++GEN_VEXT_VV(vwmulsu_vv_b, 1, 2)
++GEN_VEXT_VV(vwmulsu_vv_h, 2, 4)
++GEN_VEXT_VV(vwmulsu_vv_w, 4, 8)
+ 
+ RVVCALL(OPIVX2, vwmul_vx_b, WOP_SSS_B, H2, H1, DO_MUL)
+ RVVCALL(OPIVX2, vwmul_vx_h, WOP_SSS_H, H4, H2, DO_MUL)
+@@ -1887,15 +1790,15 @@ RVVCALL(OPIVX2, vwmulu_vx_w, WOP_UUU_W, H8, H4, DO_MUL)
+ RVVCALL(OPIVX2, vwmulsu_vx_b, WOP_SUS_B, H2, H1, DO_MUL)
+ RVVCALL(OPIVX2, vwmulsu_vx_h, WOP_SUS_H, H4, H2, DO_MUL)
+ RVVCALL(OPIVX2, vwmulsu_vx_w, WOP_SUS_W, H8, H4, DO_MUL)
+-GEN_VEXT_VX(vwmul_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwmul_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwmul_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwmulu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwmulu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwmulu_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwmulsu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwmulsu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwmulsu_vx_w, 4, 8, clearq)
++GEN_VEXT_VX(vwmul_vx_b, 1, 2)
++GEN_VEXT_VX(vwmul_vx_h, 2, 4)
++GEN_VEXT_VX(vwmul_vx_w, 4, 8)
++GEN_VEXT_VX(vwmulu_vx_b, 1, 2)
++GEN_VEXT_VX(vwmulu_vx_h, 2, 4)
++GEN_VEXT_VX(vwmulu_vx_w, 4, 8)
++GEN_VEXT_VX(vwmulsu_vx_b, 1, 2)
++GEN_VEXT_VX(vwmulsu_vx_h, 2, 4)
++GEN_VEXT_VX(vwmulsu_vx_w, 4, 8)
+ 
+ /* Vector Single-Width Integer Multiply-Add Instructions */
+ #define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP)   \
+@@ -1927,22 +1830,22 @@ RVVCALL(OPIVV3, vnmsub_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSUB)
+ RVVCALL(OPIVV3, vnmsub_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSUB)
+ RVVCALL(OPIVV3, vnmsub_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSUB)
+ RVVCALL(OPIVV3, vnmsub_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSUB)
+-GEN_VEXT_VV(vmacc_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmacc_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmacc_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmacc_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vnmsac_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vnmsac_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vnmsac_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vnmsac_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vmadd_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vmadd_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vmadd_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vmadd_vv_d, 8, 8, clearq)
+-GEN_VEXT_VV(vnmsub_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV(vnmsub_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV(vnmsub_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV(vnmsub_vv_d, 8, 8, clearq)
++GEN_VEXT_VV(vmacc_vv_b, 1, 1)
++GEN_VEXT_VV(vmacc_vv_h, 2, 2)
++GEN_VEXT_VV(vmacc_vv_w, 4, 4)
++GEN_VEXT_VV(vmacc_vv_d, 8, 8)
++GEN_VEXT_VV(vnmsac_vv_b, 1, 1)
++GEN_VEXT_VV(vnmsac_vv_h, 2, 2)
++GEN_VEXT_VV(vnmsac_vv_w, 4, 4)
++GEN_VEXT_VV(vnmsac_vv_d, 8, 8)
++GEN_VEXT_VV(vmadd_vv_b, 1, 1)
++GEN_VEXT_VV(vmadd_vv_h, 2, 2)
++GEN_VEXT_VV(vmadd_vv_w, 4, 4)
++GEN_VEXT_VV(vmadd_vv_d, 8, 8)
++GEN_VEXT_VV(vnmsub_vv_b, 1, 1)
++GEN_VEXT_VV(vnmsub_vv_h, 2, 2)
++GEN_VEXT_VV(vnmsub_vv_w, 4, 4)
++GEN_VEXT_VV(vnmsub_vv_d, 8, 8)
+ 
+ #define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP)             \
+ static void do_##NAME(void *vd, target_long s1, void *vs2, int i)   \
+@@ -1968,22 +1871,22 @@ RVVCALL(OPIVX3, vnmsub_vx_b, OP_SSS_B, H1, H1, DO_NMSUB)
+ RVVCALL(OPIVX3, vnmsub_vx_h, OP_SSS_H, H2, H2, DO_NMSUB)
+ RVVCALL(OPIVX3, vnmsub_vx_w, OP_SSS_W, H4, H4, DO_NMSUB)
+ RVVCALL(OPIVX3, vnmsub_vx_d, OP_SSS_D, H8, H8, DO_NMSUB)
+-GEN_VEXT_VX(vmacc_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmacc_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmacc_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmacc_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vnmsac_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vnmsac_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vnmsac_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vnmsac_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vmadd_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vmadd_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vmadd_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vmadd_vx_d, 8, 8, clearq)
+-GEN_VEXT_VX(vnmsub_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX(vnmsub_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX(vnmsub_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX(vnmsub_vx_d, 8, 8, clearq)
++GEN_VEXT_VX(vmacc_vx_b, 1, 1)
++GEN_VEXT_VX(vmacc_vx_h, 2, 2)
++GEN_VEXT_VX(vmacc_vx_w, 4, 4)
++GEN_VEXT_VX(vmacc_vx_d, 8, 8)
++GEN_VEXT_VX(vnmsac_vx_b, 1, 1)
++GEN_VEXT_VX(vnmsac_vx_h, 2, 2)
++GEN_VEXT_VX(vnmsac_vx_w, 4, 4)
++GEN_VEXT_VX(vnmsac_vx_d, 8, 8)
++GEN_VEXT_VX(vmadd_vx_b, 1, 1)
++GEN_VEXT_VX(vmadd_vx_h, 2, 2)
++GEN_VEXT_VX(vmadd_vx_w, 4, 4)
++GEN_VEXT_VX(vmadd_vx_d, 8, 8)
++GEN_VEXT_VX(vnmsub_vx_b, 1, 1)
++GEN_VEXT_VX(vnmsub_vx_h, 2, 2)
++GEN_VEXT_VX(vnmsub_vx_w, 4, 4)
++GEN_VEXT_VX(vnmsub_vx_d, 8, 8)
+ 
+ /* Vector Widening Integer Multiply-Add Instructions */
+ RVVCALL(OPIVV3, vwmaccu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MACC)
+@@ -1995,15 +1898,15 @@ RVVCALL(OPIVV3, vwmacc_vv_w, WOP_SSS_W, H8, H4, H4, DO_MACC)
+ RVVCALL(OPIVV3, vwmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, DO_MACC)
+ RVVCALL(OPIVV3, vwmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, DO_MACC)
+ RVVCALL(OPIVV3, vwmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, DO_MACC)
+-GEN_VEXT_VV(vwmaccu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwmaccu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwmaccu_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwmacc_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwmacc_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwmacc_vv_w, 4, 8, clearq)
+-GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8, clearq)
++GEN_VEXT_VV(vwmaccu_vv_b, 1, 2)
++GEN_VEXT_VV(vwmaccu_vv_h, 2, 4)
++GEN_VEXT_VV(vwmaccu_vv_w, 4, 8)
++GEN_VEXT_VV(vwmacc_vv_b, 1, 2)
++GEN_VEXT_VV(vwmacc_vv_h, 2, 4)
++GEN_VEXT_VV(vwmacc_vv_w, 4, 8)
++GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2)
++GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4)
++GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8)
+ 
+ RVVCALL(OPIVX3, vwmaccu_vx_b, WOP_UUU_B, H2, H1, DO_MACC)
+ RVVCALL(OPIVX3, vwmaccu_vx_h, WOP_UUU_H, H4, H2, DO_MACC)
+@@ -2017,89 +1920,78 @@ RVVCALL(OPIVX3, vwmaccsu_vx_w, WOP_SSU_W, H8, H4, DO_MACC)
+ RVVCALL(OPIVX3, vwmaccus_vx_b, WOP_SUS_B, H2, H1, DO_MACC)
+ RVVCALL(OPIVX3, vwmaccus_vx_h, WOP_SUS_H, H4, H2, DO_MACC)
+ RVVCALL(OPIVX3, vwmaccus_vx_w, WOP_SUS_W, H8, H4, DO_MACC)
+-GEN_VEXT_VX(vwmaccu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwmaccu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwmaccu_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwmacc_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwmacc_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwmacc_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8, clearq)
+-GEN_VEXT_VX(vwmaccus_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX(vwmaccus_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX(vwmaccus_vx_w, 4, 8, clearq)
++GEN_VEXT_VX(vwmaccu_vx_b, 1, 2)
++GEN_VEXT_VX(vwmaccu_vx_h, 2, 4)
++GEN_VEXT_VX(vwmaccu_vx_w, 4, 8)
++GEN_VEXT_VX(vwmacc_vx_b, 1, 2)
++GEN_VEXT_VX(vwmacc_vx_h, 2, 4)
++GEN_VEXT_VX(vwmacc_vx_w, 4, 8)
++GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2)
++GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4)
++GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8)
++GEN_VEXT_VX(vwmaccus_vx_b, 1, 2)
++GEN_VEXT_VX(vwmaccus_vx_h, 2, 4)
++GEN_VEXT_VX(vwmaccus_vx_w, 4, 8)
+ 
+ /* Vector Integer Merge and Move Instructions */
+-#define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN)                    \
++#define GEN_VEXT_VMV_VV(NAME, ETYPE, H)                              \
+ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env,           \
+                   uint32_t desc)                                     \
+ {                                                                    \
+     uint32_t vl = env->vl;                                           \
+-    uint32_t esz = sizeof(ETYPE);                                    \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                         \
+     uint32_t i;                                                      \
+                                                                      \
+     for (i = 0; i < vl; i++) {                                       \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                           \
+         *((ETYPE *)vd + H(i)) = s1;                                  \
+     }                                                                \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                         \
+ }
+ 
+-GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t,  H1, clearb)
+-GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2, clearh)
+-GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4, clearl)
+-GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8, clearq)
++GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t,  H1)
++GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2)
++GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4)
++GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8)
+ 
+-#define GEN_VEXT_VMV_VX(NAME, ETYPE, H, CLEAR_FN)                    \
++#define GEN_VEXT_VMV_VX(NAME, ETYPE, H)                              \
+ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env,         \
+                   uint32_t desc)                                     \
+ {                                                                    \
+     uint32_t vl = env->vl;                                           \
+-    uint32_t esz = sizeof(ETYPE);                                    \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                         \
+     uint32_t i;                                                      \
+                                                                      \
+     for (i = 0; i < vl; i++) {                                       \
+         *((ETYPE *)vd + H(i)) = (ETYPE)s1;                           \
+     }                                                                \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                         \
+ }
+ 
+-GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t,  H1, clearb)
+-GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2, clearh)
+-GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4, clearl)
+-GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq)
++GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t,  H1)
++GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2)
++GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4)
++GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8)
+ 
+-#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H, CLEAR_FN)                 \
++#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H)                           \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,          \
+                   CPURISCVState *env, uint32_t desc)                 \
+ {                                                                    \
+     uint32_t vl = env->vl;                                           \
+-    uint32_t esz = sizeof(ETYPE);                                    \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                         \
+     uint32_t i;                                                      \
+                                                                      \
+     for (i = 0; i < vl; i++) {                                       \
+         ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1);            \
+         *((ETYPE *)vd + H(i)) = *(vt + H(i));                        \
+     }                                                                \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                         \
+ }
+ 
+-GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t,  H1, clearb)
+-GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2, clearh)
+-GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4, clearl)
+-GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq)
++GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t,  H1)
++GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2)
++GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4)
++GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8)
+ 
+-#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H, CLEAR_FN)                 \
++#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H)                           \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,               \
+                   void *vs2, CPURISCVState *env, uint32_t desc)      \
+ {                                                                    \
+     uint32_t vl = env->vl;                                           \
+-    uint32_t esz = sizeof(ETYPE);                                    \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                         \
+     uint32_t i;                                                      \
+                                                                      \
+     for (i = 0; i < vl; i++) {                                       \
+@@ -2108,13 +2000,12 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,               \
+                    (ETYPE)(target_long)s1);                          \
+         *((ETYPE *)vd + H(i)) = d;                                   \
+     }                                                                \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                         \
+ }
+ 
+-GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t,  H1, clearb)
+-GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2, clearh)
+-GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4, clearl)
+-GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8, clearq)
++GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t,  H1)
++GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2)
++GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4)
++GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8)
+ 
+ /*
+  *** Vector Fixed-Point Arithmetic Instructions
+@@ -2157,9 +2048,8 @@ static inline void
+ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
+              CPURISCVState *env,
+              uint32_t desc, uint32_t esz, uint32_t dsz,
+-             opivv2_rm_fn *fn, clear_fn *clearfn)
++             opivv2_rm_fn *fn)
+ {
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+ 
+@@ -2181,17 +2071,15 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
+                      env, vl, vm, 3, fn);
+         break;
+     }
+-
+-    clearfn(vd, vl, vl * dsz,  vlmax * dsz);
+ }
+ 
+ /* generate helpers for fixed point instructions with OPIVV format */
+-#define GEN_VEXT_VV_RM(NAME, ESZ, DSZ, CLEAR_FN)                \
++#define GEN_VEXT_VV_RM(NAME, ESZ, DSZ)                          \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,     \
+                   CPURISCVState *env, uint32_t desc)            \
+ {                                                               \
+     vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ,         \
+-                 do_##NAME, CLEAR_FN);                          \
++                 do_##NAME);                                    \
+ }
+ 
+ static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
+@@ -2241,10 +2129,10 @@ RVVCALL(OPIVV2_RM, vsaddu_vv_b, OP_UUU_B, H1, H1, H1, saddu8)
+ RVVCALL(OPIVV2_RM, vsaddu_vv_h, OP_UUU_H, H2, H2, H2, saddu16)
+ RVVCALL(OPIVV2_RM, vsaddu_vv_w, OP_UUU_W, H4, H4, H4, saddu32)
+ RVVCALL(OPIVV2_RM, vsaddu_vv_d, OP_UUU_D, H8, H8, H8, saddu64)
+-GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8)
+ 
+ typedef void opivx2_rm_fn(void *vd, target_long s1, void *vs2, int i,
+                           CPURISCVState *env, int vxrm);
+@@ -2276,9 +2164,8 @@ static inline void
+ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
+              CPURISCVState *env,
+              uint32_t desc, uint32_t esz, uint32_t dsz,
+-             opivx2_rm_fn *fn, clear_fn *clearfn)
++             opivx2_rm_fn *fn)
+ {
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+ 
+@@ -2300,27 +2187,25 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
+                      env, vl, vm, 3, fn);
+         break;
+     }
+-
+-    clearfn(vd, vl, vl * dsz,  vlmax * dsz);
+ }
+ 
+ /* generate helpers for fixed point instructions with OPIVX format */
+-#define GEN_VEXT_VX_RM(NAME, ESZ, DSZ, CLEAR_FN)          \
++#define GEN_VEXT_VX_RM(NAME, ESZ, DSZ)                    \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,    \
+         void *vs2, CPURISCVState *env, uint32_t desc)     \
+ {                                                         \
+     vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ,    \
+-                 do_##NAME, CLEAR_FN);                    \
++                 do_##NAME);                              \
+ }
+ 
+ RVVCALL(OPIVX2_RM, vsaddu_vx_b, OP_UUU_B, H1, H1, saddu8)
+ RVVCALL(OPIVX2_RM, vsaddu_vx_h, OP_UUU_H, H2, H2, saddu16)
+ RVVCALL(OPIVX2_RM, vsaddu_vx_w, OP_UUU_W, H4, H4, saddu32)
+ RVVCALL(OPIVX2_RM, vsaddu_vx_d, OP_UUU_D, H8, H8, saddu64)
+-GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8)
+ 
+ static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+ {
+@@ -2366,19 +2251,19 @@ RVVCALL(OPIVV2_RM, vsadd_vv_b, OP_SSS_B, H1, H1, H1, sadd8)
+ RVVCALL(OPIVV2_RM, vsadd_vv_h, OP_SSS_H, H2, H2, H2, sadd16)
+ RVVCALL(OPIVV2_RM, vsadd_vv_w, OP_SSS_W, H4, H4, H4, sadd32)
+ RVVCALL(OPIVV2_RM, vsadd_vv_d, OP_SSS_D, H8, H8, H8, sadd64)
+-GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vsadd_vx_b, OP_SSS_B, H1, H1, sadd8)
+ RVVCALL(OPIVX2_RM, vsadd_vx_h, OP_SSS_H, H2, H2, sadd16)
+ RVVCALL(OPIVX2_RM, vsadd_vx_w, OP_SSS_W, H4, H4, sadd32)
+ RVVCALL(OPIVX2_RM, vsadd_vx_d, OP_SSS_D, H8, H8, sadd64)
+-GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8)
+ 
+ static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
+ {
+@@ -2427,19 +2312,19 @@ RVVCALL(OPIVV2_RM, vssubu_vv_b, OP_UUU_B, H1, H1, H1, ssubu8)
+ RVVCALL(OPIVV2_RM, vssubu_vv_h, OP_UUU_H, H2, H2, H2, ssubu16)
+ RVVCALL(OPIVV2_RM, vssubu_vv_w, OP_UUU_W, H4, H4, H4, ssubu32)
+ RVVCALL(OPIVV2_RM, vssubu_vv_d, OP_UUU_D, H8, H8, H8, ssubu64)
+-GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vssubu_vx_b, OP_UUU_B, H1, H1, ssubu8)
+ RVVCALL(OPIVX2_RM, vssubu_vx_h, OP_UUU_H, H2, H2, ssubu16)
+ RVVCALL(OPIVX2_RM, vssubu_vx_w, OP_UUU_W, H4, H4, ssubu32)
+ RVVCALL(OPIVX2_RM, vssubu_vx_d, OP_UUU_D, H8, H8, ssubu64)
+-GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8)
+ 
+ static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+ {
+@@ -2485,19 +2370,19 @@ RVVCALL(OPIVV2_RM, vssub_vv_b, OP_SSS_B, H1, H1, H1, ssub8)
+ RVVCALL(OPIVV2_RM, vssub_vv_h, OP_SSS_H, H2, H2, H2, ssub16)
+ RVVCALL(OPIVV2_RM, vssub_vv_w, OP_SSS_W, H4, H4, H4, ssub32)
+ RVVCALL(OPIVV2_RM, vssub_vv_d, OP_SSS_D, H8, H8, H8, ssub64)
+-GEN_VEXT_VV_RM(vssub_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vssub_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vssub_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vssub_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vssub_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vssub_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vssub_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vssub_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vssub_vx_b, OP_SSS_B, H1, H1, ssub8)
+ RVVCALL(OPIVX2_RM, vssub_vx_h, OP_SSS_H, H2, H2, ssub16)
+ RVVCALL(OPIVX2_RM, vssub_vx_w, OP_SSS_W, H4, H4, ssub32)
+ RVVCALL(OPIVX2_RM, vssub_vx_d, OP_SSS_D, H8, H8, ssub64)
+-GEN_VEXT_VX_RM(vssub_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vssub_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vssub_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vssub_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vssub_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vssub_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vssub_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vssub_vx_d, 8, 8)
+ 
+ /* Vector Single-Width Averaging Add and Subtract */
+ static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift)
+@@ -2549,19 +2434,19 @@ RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32)
+ RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32)
+ RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32)
+ RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64)
+-GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32)
+ RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32)
+ RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32)
+ RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64)
+-GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8)
+ 
+ static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+ {
+@@ -2585,19 +2470,19 @@ RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32)
+ RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32)
+ RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32)
+ RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64)
+-GEN_VEXT_VV_RM(vasub_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vasub_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vasub_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vasub_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vasub_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vasub_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vasub_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vasub_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32)
+ RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32)
+ RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32)
+ RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64)
+-GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vasub_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vasub_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vasub_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vasub_vx_d, 8, 8)
+ 
+ /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
+ static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+@@ -2692,19 +2577,19 @@ RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8)
+ RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16)
+ RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32)
+ RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64)
+-GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8)
+ RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16)
+ RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32)
+ RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64)
+-GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8)
+ 
+ /* Vector Widening Saturating Scaled Multiply-Add */
+ static inline uint16_t
+@@ -2757,9 +2642,9 @@ do_##NAME(void *vd, void *vs1, void *vs2, int i,                   \
+ RVVCALL(OPIVV3_RM, vwsmaccu_vv_b, WOP_UUU_B, H2, H1, H1, vwsmaccu8)
+ RVVCALL(OPIVV3_RM, vwsmaccu_vv_h, WOP_UUU_H, H4, H2, H2, vwsmaccu16)
+ RVVCALL(OPIVV3_RM, vwsmaccu_vv_w, WOP_UUU_W, H8, H4, H4, vwsmaccu32)
+-GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2)
++GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4)
++GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8)
+ 
+ #define OPIVX3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP)         \
+ static inline void                                                 \
+@@ -2774,9 +2659,9 @@ do_##NAME(void *vd, target_long s1, void *vs2, int i,              \
+ RVVCALL(OPIVX3_RM, vwsmaccu_vx_b, WOP_UUU_B, H2, H1, vwsmaccu8)
+ RVVCALL(OPIVX3_RM, vwsmaccu_vx_h, WOP_UUU_H, H4, H2, vwsmaccu16)
+ RVVCALL(OPIVX3_RM, vwsmaccu_vx_w, WOP_UUU_W, H8, H4, vwsmaccu32)
+-GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8, clearq)
++GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2)
++GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4)
++GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8)
+ 
+ static inline int16_t
+ vwsmacc8(CPURISCVState *env, int vxrm, int8_t a, int8_t b, int16_t c)
+@@ -2815,15 +2700,15 @@ vwsmacc32(CPURISCVState *env, int vxrm, int32_t a, int32_t b, int64_t c)
+ RVVCALL(OPIVV3_RM, vwsmacc_vv_b, WOP_SSS_B, H2, H1, H1, vwsmacc8)
+ RVVCALL(OPIVV3_RM, vwsmacc_vv_h, WOP_SSS_H, H4, H2, H2, vwsmacc16)
+ RVVCALL(OPIVV3_RM, vwsmacc_vv_w, WOP_SSS_W, H8, H4, H4, vwsmacc32)
+-GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2)
++GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4)
++GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8)
+ RVVCALL(OPIVX3_RM, vwsmacc_vx_b, WOP_SSS_B, H2, H1, vwsmacc8)
+ RVVCALL(OPIVX3_RM, vwsmacc_vx_h, WOP_SSS_H, H4, H2, vwsmacc16)
+ RVVCALL(OPIVX3_RM, vwsmacc_vx_w, WOP_SSS_W, H8, H4, vwsmacc32)
+-GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8, clearq)
++GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2)
++GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4)
++GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8)
+ 
+ static inline int16_t
+ vwsmaccsu8(CPURISCVState *env, int vxrm, uint8_t a, int8_t b, int16_t c)
+@@ -2861,15 +2746,15 @@ vwsmaccsu32(CPURISCVState *env, int vxrm, uint32_t a, int32_t b, int64_t c)
+ RVVCALL(OPIVV3_RM, vwsmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, vwsmaccsu8)
+ RVVCALL(OPIVV3_RM, vwsmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, vwsmaccsu16)
+ RVVCALL(OPIVV3_RM, vwsmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, vwsmaccsu32)
+-GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2, clearh)
+-GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2)
++GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4)
++GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8)
+ RVVCALL(OPIVX3_RM, vwsmaccsu_vx_b, WOP_SSU_B, H2, H1, vwsmaccsu8)
+ RVVCALL(OPIVX3_RM, vwsmaccsu_vx_h, WOP_SSU_H, H4, H2, vwsmaccsu16)
+ RVVCALL(OPIVX3_RM, vwsmaccsu_vx_w, WOP_SSU_W, H8, H4, vwsmaccsu32)
+-GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8, clearq)
++GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2)
++GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4)
++GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8)
+ 
+ static inline int16_t
+ vwsmaccus8(CPURISCVState *env, int vxrm, int8_t a, uint8_t b, int16_t c)
+@@ -2907,9 +2792,9 @@ vwsmaccus32(CPURISCVState *env, int vxrm, int32_t a, uint32_t b, int64_t c)
+ RVVCALL(OPIVX3_RM, vwsmaccus_vx_b, WOP_SUS_B, H2, H1, vwsmaccus8)
+ RVVCALL(OPIVX3_RM, vwsmaccus_vx_h, WOP_SUS_H, H4, H2, vwsmaccus16)
+ RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32)
+-GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2, clearh)
+-GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl)
+-GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq)
++GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2)
++GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4)
++GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8)
+ 
+ /* Vector Single-Width Scaling Shift Instructions */
+ static inline uint8_t
+@@ -2956,19 +2841,19 @@ RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8)
+ RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16)
+ RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32)
+ RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64)
+-GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8)
+ RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16)
+ RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32)
+ RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64)
+-GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8)
+ 
+ static inline int8_t
+ vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+@@ -3015,19 +2900,19 @@ RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8)
+ RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16)
+ RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32)
+ RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64)
+-GEN_VEXT_VV_RM(vssra_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vssra_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vssra_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_RM(vssra_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_RM(vssra_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vssra_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vssra_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vssra_vv_d, 8, 8)
+ 
+ RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8)
+ RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16)
+ RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32)
+ RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64)
+-GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl)
+-GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq)
++GEN_VEXT_VX_RM(vssra_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vssra_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vssra_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vssra_vx_d, 8, 8)
+ 
+ /* Vector Narrowing Fixed-Point Clip Instructions */
+ static inline int8_t
+@@ -3090,16 +2975,16 @@ vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
+ RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
+ RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
+ RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
+-GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4, clearl)
++GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4)
+ 
+ RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8)
+ RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16)
+ RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32)
+-GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4, clearl)
++GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4)
+ 
+ static inline uint8_t
+ vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
+@@ -3152,16 +3037,16 @@ vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
+ RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
+ RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
+ RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
+-GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1, clearb)
+-GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4, clearl)
++GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4)
+ 
+ RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8)
+ RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16)
+ RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32)
+-GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb)
+-GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh)
+-GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl)
++GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4)
+ 
+ /*
+  *** Vector Float Point Arithmetic Instructions
+@@ -3176,12 +3061,11 @@ static void do_##NAME(void *vd, void *vs1, void *vs2, int i,   \
+     *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status);         \
+ }
+ 
+-#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ, CLEAR_FN)         \
++#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ)                   \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+                   void *vs2, CPURISCVState *env,          \
+                   uint32_t desc)                          \
+ {                                                         \
+-    uint32_t vlmax = vext_maxsz(desc) / ESZ;              \
+     uint32_t vm = vext_vm(desc);                          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+@@ -3192,15 +3076,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+         }                                                 \
+         do_##NAME(vd, vs1, vs2, i, env);                  \
+     }                                                     \
+-    CLEAR_FN(vd, vl, vl * DSZ,  vlmax * DSZ);             \
+ }
+ 
+ RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add)
+ RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add)
+ RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add)
+-GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8)
+ 
+ #define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP)        \
+ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
+@@ -3210,12 +3093,11 @@ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
+     *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\
+ }
+ 
+-#define GEN_VEXT_VF(NAME, ESZ, DSZ, CLEAR_FN)             \
++#define GEN_VEXT_VF(NAME, ESZ, DSZ)                       \
+ void HELPER(NAME)(void *vd, void *v0, uint64_t s1,        \
+                   void *vs2, CPURISCVState *env,          \
+                   uint32_t desc)                          \
+ {                                                         \
+-    uint32_t vlmax = vext_maxsz(desc) / ESZ;              \
+     uint32_t vm = vext_vm(desc);                          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+@@ -3226,28 +3108,27 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1,        \
+         }                                                 \
+         do_##NAME(vd, s1, vs2, i, env);                   \
+     }                                                     \
+-    CLEAR_FN(vd, vl, vl * DSZ,  vlmax * DSZ);             \
+ }
+ 
+ RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add)
+ RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add)
+ RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add)
+-GEN_VEXT_VF(vfadd_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfadd_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfadd_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfadd_vf_h, 2, 2)
++GEN_VEXT_VF(vfadd_vf_w, 4, 4)
++GEN_VEXT_VF(vfadd_vf_d, 8, 8)
+ 
+ RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub)
+ RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub)
+ RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub)
+-GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub)
+ RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub)
+ RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub)
+-GEN_VEXT_VF(vfsub_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfsub_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfsub_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfsub_vf_h, 2, 2)
++GEN_VEXT_VF(vfsub_vf_w, 4, 4)
++GEN_VEXT_VF(vfsub_vf_d, 8, 8)
+ 
+ static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s)
+ {
+@@ -3267,9 +3148,9 @@ static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s)
+ RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub)
+ RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub)
+ RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub)
+-GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfrsub_vf_h, 2, 2)
++GEN_VEXT_VF(vfrsub_vf_w, 4, 4)
++GEN_VEXT_VF(vfrsub_vf_d, 8, 8)
+ 
+ /* Vector Widening Floating-Point Add/Subtract Instructions */
+ static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s)
+@@ -3287,12 +3168,12 @@ static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s)
+ 
+ RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16)
+ RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32)
+-GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8)
+ RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16)
+ RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32)
+-GEN_VEXT_VF(vfwadd_vf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwadd_vf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwadd_vf_h, 2, 4)
++GEN_VEXT_VF(vfwadd_vf_w, 4, 8)
+ 
+ static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s)
+ {
+@@ -3309,12 +3190,12 @@ static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s)
+ 
+ RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16)
+ RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32)
+-GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8)
+ RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16)
+ RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32)
+-GEN_VEXT_VF(vfwsub_vf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwsub_vf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwsub_vf_h, 2, 4)
++GEN_VEXT_VF(vfwsub_vf_w, 4, 8)
+ 
+ static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s)
+ {
+@@ -3328,12 +3209,12 @@ static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s)
+ 
+ RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16)
+ RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32)
+-GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8)
+ RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16)
+ RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32)
+-GEN_VEXT_VF(vfwadd_wf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwadd_wf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwadd_wf_h, 2, 4)
++GEN_VEXT_VF(vfwadd_wf_w, 4, 8)
+ 
+ static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s)
+ {
+@@ -3347,39 +3228,39 @@ static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s)
+ 
+ RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16)
+ RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32)
+-GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8)
+ RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16)
+ RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32)
+-GEN_VEXT_VF(vfwsub_wf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwsub_wf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwsub_wf_h, 2, 4)
++GEN_VEXT_VF(vfwsub_wf_w, 4, 8)
+ 
+ /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
+ RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul)
+ RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul)
+ RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul)
+-GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul)
+ RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul)
+ RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul)
+-GEN_VEXT_VF(vfmul_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfmul_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfmul_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfmul_vf_h, 2, 2)
++GEN_VEXT_VF(vfmul_vf_w, 4, 4)
++GEN_VEXT_VF(vfmul_vf_d, 8, 8)
+ 
+ RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div)
+ RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div)
+ RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div)
+-GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div)
+ RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div)
+ RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div)
+-GEN_VEXT_VF(vfdiv_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfdiv_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfdiv_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfdiv_vf_h, 2, 2)
++GEN_VEXT_VF(vfdiv_vf_w, 4, 4)
++GEN_VEXT_VF(vfdiv_vf_d, 8, 8)
+ 
+ static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s)
+ {
+@@ -3399,9 +3280,9 @@ static uint64_t float64_rdiv(uint64_t a, uint64_t b, float_status *s)
+ RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv)
+ RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv)
+ RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv)
+-GEN_VEXT_VF(vfrdiv_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfrdiv_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfrdiv_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfrdiv_vf_h, 2, 2)
++GEN_VEXT_VF(vfrdiv_vf_w, 4, 4)
++GEN_VEXT_VF(vfrdiv_vf_d, 8, 8)
+ 
+ /* Vector Widening Floating-Point Multiply */
+ static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s)
+@@ -3418,12 +3299,12 @@ static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s)
+ }
+ RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16)
+ RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32)
+-GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8)
+ RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16)
+ RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32)
+-GEN_VEXT_VF(vfwmul_vf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwmul_vf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwmul_vf_h, 2, 4)
++GEN_VEXT_VF(vfwmul_vf_w, 4, 8)
+ 
+ /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
+ #define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP)       \
+@@ -3454,9 +3335,9 @@ static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16)
+ RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32)
+ RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64)
+-GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8)
+ 
+ #define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP)           \
+ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i,    \
+@@ -3470,9 +3351,9 @@ static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i,    \
+ RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16)
+ RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32)
+ RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64)
+-GEN_VEXT_VF(vfmacc_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfmacc_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfmacc_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfmacc_vf_h, 2, 2)
++GEN_VEXT_VF(vfmacc_vf_w, 4, 4)
++GEN_VEXT_VF(vfmacc_vf_d, 8, 8)
+ 
+ static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+ {
+@@ -3495,15 +3376,15 @@ static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16)
+ RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32)
+ RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64)
+-GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8)
+ RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16)
+ RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32)
+ RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64)
+-GEN_VEXT_VF(vfnmacc_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfnmacc_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfnmacc_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfnmacc_vf_h, 2, 2)
++GEN_VEXT_VF(vfnmacc_vf_w, 4, 4)
++GEN_VEXT_VF(vfnmacc_vf_d, 8, 8)
+ 
+ static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+ {
+@@ -3523,15 +3404,15 @@ static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16)
+ RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32)
+ RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64)
+-GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8)
+ RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16)
+ RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32)
+ RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64)
+-GEN_VEXT_VF(vfmsac_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfmsac_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfmsac_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfmsac_vf_h, 2, 2)
++GEN_VEXT_VF(vfmsac_vf_w, 4, 4)
++GEN_VEXT_VF(vfmsac_vf_d, 8, 8)
+ 
+ static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+ {
+@@ -3551,15 +3432,15 @@ static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16)
+ RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32)
+ RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64)
+-GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8)
+ RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16)
+ RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32)
+ RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64)
+-GEN_VEXT_VF(vfnmsac_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfnmsac_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfnmsac_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfnmsac_vf_h, 2, 2)
++GEN_VEXT_VF(vfnmsac_vf_w, 4, 4)
++GEN_VEXT_VF(vfnmsac_vf_d, 8, 8)
+ 
+ static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+ {
+@@ -3579,15 +3460,15 @@ static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16)
+ RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32)
+ RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64)
+-GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8)
+ RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16)
+ RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32)
+ RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64)
+-GEN_VEXT_VF(vfmadd_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfmadd_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfmadd_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfmadd_vf_h, 2, 2)
++GEN_VEXT_VF(vfmadd_vf_w, 4, 4)
++GEN_VEXT_VF(vfmadd_vf_d, 8, 8)
+ 
+ static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+ {
+@@ -3610,15 +3491,15 @@ static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16)
+ RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32)
+ RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64)
+-GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8)
+ RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16)
+ RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32)
+ RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64)
+-GEN_VEXT_VF(vfnmadd_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfnmadd_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfnmadd_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfnmadd_vf_h, 2, 2)
++GEN_VEXT_VF(vfnmadd_vf_w, 4, 4)
++GEN_VEXT_VF(vfnmadd_vf_d, 8, 8)
+ 
+ static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+ {
+@@ -3638,15 +3519,15 @@ static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16)
+ RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32)
+ RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64)
+-GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8)
+ RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16)
+ RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32)
+ RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64)
+-GEN_VEXT_VF(vfmsub_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfmsub_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfmsub_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfmsub_vf_h, 2, 2)
++GEN_VEXT_VF(vfmsub_vf_w, 4, 4)
++GEN_VEXT_VF(vfmsub_vf_d, 8, 8)
+ 
+ static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+ {
+@@ -3666,15 +3547,15 @@ static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+ RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16)
+ RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32)
+ RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64)
+-GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8)
+ RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16)
+ RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32)
+ RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64)
+-GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfnmsub_vf_h, 2, 2)
++GEN_VEXT_VF(vfnmsub_vf_w, 4, 4)
++GEN_VEXT_VF(vfnmsub_vf_d, 8, 8)
+ 
+ /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
+ static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+@@ -3691,12 +3572,12 @@ static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+ 
+ RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16)
+ RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32)
+-GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8)
+ RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16)
+ RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32)
+-GEN_VEXT_VF(vfwmacc_vf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwmacc_vf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwmacc_vf_h, 2, 4)
++GEN_VEXT_VF(vfwmacc_vf_w, 4, 8)
+ 
+ static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+ {
+@@ -3714,12 +3595,12 @@ static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+ 
+ RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16)
+ RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32)
+-GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8)
+ RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16)
+ RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32)
+-GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4)
++GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8)
+ 
+ static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+ {
+@@ -3737,12 +3618,12 @@ static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+ 
+ RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16)
+ RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32)
+-GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8)
+ RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16)
+ RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32)
+-GEN_VEXT_VF(vfwmsac_vf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwmsac_vf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwmsac_vf_h, 2, 4)
++GEN_VEXT_VF(vfwmsac_vf_w, 4, 8)
+ 
+ static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+ {
+@@ -3760,12 +3641,12 @@ static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+ 
+ RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16)
+ RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32)
+-GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4, clearl)
+-GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8, clearq)
++GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4)
++GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8)
+ RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16)
+ RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32)
+-GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4, clearl)
+-GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8, clearq)
++GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4)
++GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8)
+ 
+ /* Vector Floating-Point Square-Root Instruction */
+ /* (TD, T2, TX2) */
+@@ -3781,11 +3662,10 @@ static void do_##NAME(void *vd, void *vs2, int i,      \
+     *((TD *)vd + HD(i)) = OP(s2, &env->fp_status);     \
+ }
+ 
+-#define GEN_VEXT_V_ENV(NAME, ESZ, DSZ, CLEAR_FN)       \
++#define GEN_VEXT_V_ENV(NAME, ESZ, DSZ)                 \
+ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+         CPURISCVState *env, uint32_t desc)             \
+ {                                                      \
+-    uint32_t vlmax = vext_maxsz(desc) / ESZ;           \
+     uint32_t vm = vext_vm(desc);                       \
+     uint32_t vl = env->vl;                             \
+     uint32_t i;                                        \
+@@ -3799,42 +3679,41 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+         }                                              \
+         do_##NAME(vd, vs2, i, env);                    \
+     }                                                  \
+-    CLEAR_FN(vd, vl, vl * DSZ,  vlmax * DSZ);          \
+ }
+ 
+ RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt)
+ RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt)
+ RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt)
+-GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4, clearl)
+-GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8, clearq)
++GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4)
++GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8)
+ 
+ /* Vector Floating-Point MIN/MAX Instructions */
+ RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum)
+ RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum)
+ RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum)
+-GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum)
+ RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum)
+ RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum)
+-GEN_VEXT_VF(vfmin_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfmin_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfmin_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfmin_vf_h, 2, 2)
++GEN_VEXT_VF(vfmin_vf_w, 4, 4)
++GEN_VEXT_VF(vfmin_vf_d, 8, 8)
+ 
+ RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum)
+ RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum)
+ RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum)
+-GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum)
+ RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum)
+ RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum)
+-GEN_VEXT_VF(vfmax_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfmax_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfmax_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfmax_vf_h, 2, 2)
++GEN_VEXT_VF(vfmax_vf_w, 4, 4)
++GEN_VEXT_VF(vfmax_vf_d, 8, 8)
+ 
+ /* Vector Floating-Point Sign-Injection Instructions */
+ static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s)
+@@ -3855,15 +3734,15 @@ static uint64_t fsgnj64(uint64_t a, uint64_t b, float_status *s)
+ RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16)
+ RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32)
+ RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64)
+-GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16)
+ RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32)
+ RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64)
+-GEN_VEXT_VF(vfsgnj_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfsgnj_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfsgnj_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfsgnj_vf_h, 2, 2)
++GEN_VEXT_VF(vfsgnj_vf_w, 4, 4)
++GEN_VEXT_VF(vfsgnj_vf_d, 8, 8)
+ 
+ static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s)
+ {
+@@ -3883,15 +3762,15 @@ static uint64_t fsgnjn64(uint64_t a, uint64_t b, float_status *s)
+ RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16)
+ RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32)
+ RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64)
+-GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16)
+ RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32)
+ RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64)
+-GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2)
++GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4)
++GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8)
+ 
+ static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s)
+ {
+@@ -3911,15 +3790,15 @@ static uint64_t fsgnjx64(uint64_t a, uint64_t b, float_status *s)
+ RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16)
+ RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32)
+ RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64)
+-GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2, clearh)
+-GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4, clearl)
+-GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8, clearq)
++GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2)
++GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4)
++GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8)
+ RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16)
+ RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32)
+ RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64)
+-GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2, clearh)
+-GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4, clearl)
+-GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq)
++GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2)
++GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4)
++GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8)
+ 
+ /* Vector Floating-Point Compare Instructions */
+ #define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP)            \
+@@ -4076,11 +3955,10 @@ static void do_##NAME(void *vd, void *vs2, int i)      \
+     *((TD *)vd + HD(i)) = OP(s2);                      \
+ }
+ 
+-#define GEN_VEXT_V(NAME, ESZ, DSZ, CLEAR_FN)           \
++#define GEN_VEXT_V(NAME, ESZ, DSZ)                     \
+ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+                   CPURISCVState *env, uint32_t desc)   \
+ {                                                      \
+-    uint32_t vlmax = vext_maxsz(desc) / ESZ;           \
+     uint32_t vm = vext_vm(desc);                       \
+     uint32_t vl = env->vl;                             \
+     uint32_t i;                                        \
+@@ -4091,7 +3969,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+         }                                              \
+         do_##NAME(vd, vs2, i);                         \
+     }                                                  \
+-    CLEAR_FN(vd, vl, vl * DSZ,  vlmax * DSZ);          \
+ }
+ 
+ target_ulong fclass_h(uint64_t frs1)
+@@ -4154,19 +4031,17 @@ target_ulong fclass_d(uint64_t frs1)
+ RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h)
+ RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s)
+ RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d)
+-GEN_VEXT_V(vfclass_v_h, 2, 2, clearh)
+-GEN_VEXT_V(vfclass_v_w, 4, 4, clearl)
+-GEN_VEXT_V(vfclass_v_d, 8, 8, clearq)
++GEN_VEXT_V(vfclass_v_h, 2, 2)
++GEN_VEXT_V(vfclass_v_w, 4, 4)
++GEN_VEXT_V(vfclass_v_d, 8, 8)
+ 
+ /* Vector Floating-Point Merge Instruction */
+-#define GEN_VFMERGE_VF(NAME, ETYPE, H, CLEAR_FN)              \
++#define GEN_VFMERGE_VF(NAME, ETYPE, H)                        \
+ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+     uint32_t vm = vext_vm(desc);                              \
+     uint32_t vl = env->vl;                                    \
+-    uint32_t esz = sizeof(ETYPE);                             \
+-    uint32_t vlmax = vext_maxsz(desc) / esz;                  \
+     uint32_t i;                                               \
+                                                               \
+     for (i = 0; i < vl; i++) {                                \
+@@ -4174,45 +4049,44 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+         *((ETYPE *)vd + H(i))                                 \
+           = (!vm && !vext_elem_mask(v0, i) ? s2 : s1);        \
+     }                                                         \
+-    CLEAR_FN(vd, vl, vl * esz, vlmax * esz);                  \
+ }
+ 
+-GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2, clearh)
+-GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4, clearl)
+-GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8, clearq)
++GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
++GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4)
++GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8)
+ 
+ /* Single-Width Floating-Point/Integer Type-Convert Instructions */
+ /* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
+ RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16)
+ RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32)
+ RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64)
+-GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4, clearl)
+-GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8, clearq)
++GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4)
++GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8)
+ 
+ /* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */
+ RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16)
+ RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32)
+ RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64)
+-GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4, clearl)
+-GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8, clearq)
++GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4)
++GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8)
+ 
+ /* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */
+ RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16)
+ RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32)
+ RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64)
+-GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4, clearl)
+-GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8, clearq)
++GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4)
++GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8)
+ 
+ /* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */
+ RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16)
+ RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32)
+ RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64)
+-GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4, clearl)
+-GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq)
++GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4)
++GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8)
+ 
+ /* Widening Floating-Point/Integer Type-Convert Instructions */
+ /* (TD, T2, TX2) */
+@@ -4221,26 +4095,26 @@ GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq)
+ /* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/
+ RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32)
+ RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64)
+-GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4, clearl)
+-GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8, clearq)
++GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4)
++GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8)
+ 
+ /* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */
+ RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32)
+ RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64)
+-GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4, clearl)
+-GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8, clearq)
++GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4)
++GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8)
+ 
+ /* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */
+ RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32)
+ RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64)
+-GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4, clearl)
+-GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8, clearq)
++GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4)
++GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8)
+ 
+ /* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */
+ RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32)
+ RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64)
+-GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4, clearl)
+-GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8, clearq)
++GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4)
++GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8)
+ 
+ /*
+  * vfwcvt.f.f.v vd, vs2, vm #
+@@ -4253,8 +4127,8 @@ static uint32_t vfwcvtffv16(uint16_t a, float_status *s)
+ 
+ RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16)
+ RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64)
+-GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4, clearl)
+-GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq)
++GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4)
++GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8)
+ 
+ /* Narrowing Floating-Point/Integer Type-Convert Instructions */
+ /* (TD, T2, TX2) */
+@@ -4263,26 +4137,26 @@ GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq)
+ /* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
+ RVVCALL(OPFVV1, vfncvt_xu_f_v_h, NOP_UU_H, H2, H4, float32_to_uint16)
+ RVVCALL(OPFVV1, vfncvt_xu_f_v_w, NOP_UU_W, H4, H8, float64_to_uint32)
+-GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4, clearl)
++GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4)
+ 
+ /* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */
+ RVVCALL(OPFVV1, vfncvt_x_f_v_h, NOP_UU_H, H2, H4, float32_to_int16)
+ RVVCALL(OPFVV1, vfncvt_x_f_v_w, NOP_UU_W, H4, H8, float64_to_int32)
+-GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4, clearl)
++GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4)
+ 
+ /* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */
+ RVVCALL(OPFVV1, vfncvt_f_xu_v_h, NOP_UU_H, H2, H4, uint32_to_float16)
+ RVVCALL(OPFVV1, vfncvt_f_xu_v_w, NOP_UU_W, H4, H8, uint64_to_float32)
+-GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4, clearl)
++GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4)
+ 
+ /* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */
+ RVVCALL(OPFVV1, vfncvt_f_x_v_h, NOP_UU_H, H2, H4, int32_to_float16)
+ RVVCALL(OPFVV1, vfncvt_f_x_v_w, NOP_UU_W, H4, H8, int64_to_float32)
+-GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4, clearl)
++GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4)
+ 
+ /* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */
+ static uint16_t vfncvtffv16(uint32_t a, float_status *s)
+@@ -4292,21 +4166,20 @@ static uint16_t vfncvtffv16(uint32_t a, float_status *s)
+ 
+ RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, vfncvtffv16)
+ RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32)
+-GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2, clearh)
+-GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl)
++GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4)
+ 
+ /*
+  *** Vector Reduction Operations
+  */
+ /* Vector Single-Width Integer Reduction Instructions */
+-#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\
++#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP)          \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+         void *vs2, CPURISCVState *env, uint32_t desc)     \
+ {                                                         \
+     uint32_t vm = vext_vm(desc);                          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+-    uint32_t tot = env_archcpu(env)->cfg.vlen / 8;        \
+     TD s1 =  *((TD *)vs1 + HD(0));                        \
+                                                           \
+     for (i = 0; i < vl; i++) {                            \
+@@ -4317,70 +4190,69 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+         s1 = OP(s1, (TD)s2);                              \
+     }                                                     \
+     *((TD *)vd + HD(0)) = s1;                             \
+-    CLEAR_FN(vd, 1, sizeof(TD), tot);                     \
+ }
+ 
+ /* vd[0] = sum(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD, clearb)
+-GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD, clearh)
+-GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD, clearl)
+-GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD, clearq)
++GEN_VEXT_RED(vredsum_vs_b, int8_t,  int8_t,  H1, H1, DO_ADD)
++GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD)
++GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD)
++GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD)
+ 
+ /* vd[0] = maxu(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX, clearb)
+-GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX, clearh)
+-GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX, clearl)
+-GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX, clearq)
++GEN_VEXT_RED(vredmaxu_vs_b, uint8_t,  uint8_t,  H1, H1, DO_MAX)
++GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX)
++GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX)
++GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX)
+ 
+ /* vd[0] = max(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX, clearb)
+-GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX, clearh)
+-GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX, clearl)
+-GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX, clearq)
++GEN_VEXT_RED(vredmax_vs_b, int8_t,  int8_t,  H1, H1, DO_MAX)
++GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX)
++GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX)
++GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX)
+ 
+ /* vd[0] = minu(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN, clearb)
+-GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN, clearh)
+-GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN, clearl)
+-GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN, clearq)
++GEN_VEXT_RED(vredminu_vs_b, uint8_t,  uint8_t,  H1, H1, DO_MIN)
++GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN)
++GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN)
++GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN)
+ 
+ /* vd[0] = min(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN, clearb)
+-GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN, clearh)
+-GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN, clearl)
+-GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN, clearq)
++GEN_VEXT_RED(vredmin_vs_b, int8_t,  int8_t,  H1, H1, DO_MIN)
++GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN)
++GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN)
++GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN)
+ 
+ /* vd[0] = and(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND, clearb)
+-GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND, clearh)
+-GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND, clearl)
+-GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND, clearq)
++GEN_VEXT_RED(vredand_vs_b, int8_t,  int8_t,  H1, H1, DO_AND)
++GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND)
++GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND)
++GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND)
+ 
+ /* vd[0] = or(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR, clearb)
+-GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR, clearh)
+-GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR, clearl)
+-GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR, clearq)
++GEN_VEXT_RED(vredor_vs_b, int8_t,  int8_t,  H1, H1, DO_OR)
++GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR)
++GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR)
++GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR)
+ 
+ /* vd[0] = xor(vs1[0], vs2[*]) */
+-GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR, clearb)
+-GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR, clearh)
+-GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR, clearl)
+-GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR, clearq)
++GEN_VEXT_RED(vredxor_vs_b, int8_t,  int8_t,  H1, H1, DO_XOR)
++GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR)
++GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR)
++GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR)
+ 
+ /* Vector Widening Integer Reduction Instructions */
+ /* signed sum reduction into double-width accumulator */
+-GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD, clearh)
+-GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD, clearl)
+-GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD, clearq)
++GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t,  H2, H1, DO_ADD)
++GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD)
++GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD)
+ 
+ /* Unsigned sum reduction into double-width accumulator */
+-GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD, clearh)
+-GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD, clearl)
+-GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD, clearq)
++GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t,  H2, H1, DO_ADD)
++GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD)
++GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD)
+ 
+ /* Vector Single-Width Floating-Point Reduction Instructions */
+-#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\
++#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP)          \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1,           \
+                   void *vs2, CPURISCVState *env,           \
+                   uint32_t desc)                           \
+@@ -4388,7 +4260,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,           \
+     uint32_t vm = vext_vm(desc);                           \
+     uint32_t vl = env->vl;                                 \
+     uint32_t i;                                            \
+-    uint32_t tot = env_archcpu(env)->cfg.vlen / 8;         \
+     TD s1 =  *((TD *)vs1 + HD(0));                         \
+                                                            \
+     for (i = 0; i < vl; i++) {                             \
+@@ -4399,23 +4270,22 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,           \
+         s1 = OP(s1, (TD)s2, &env->fp_status);              \
+     }                                                      \
+     *((TD *)vd + HD(0)) = s1;                              \
+-    CLEAR_FN(vd, 1, sizeof(TD), tot);                      \
+ }
+ 
+ /* Unordered sum */
+-GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add, clearh)
+-GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add, clearl)
+-GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add, clearq)
++GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add)
++GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
++GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
+ 
+ /* Maximum value */
+-GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum, clearh)
+-GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum, clearl)
+-GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum, clearq)
++GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum)
++GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum)
++GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum)
+ 
+ /* Minimum value */
+-GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum, clearh)
+-GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum, clearl)
+-GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq)
++GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum)
++GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum)
++GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum)
+ 
+ /* Vector Widening Floating-Point Reduction Instructions */
+ /* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
+@@ -4425,7 +4295,6 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+-    uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
+     uint32_t s1 =  *((uint32_t *)vs1 + H4(0));
+ 
+     for (i = 0; i < vl; i++) {
+@@ -4437,7 +4306,6 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+                          &env->fp_status);
+     }
+     *((uint32_t *)vd + H4(0)) = s1;
+-    clearl(vd, 1, sizeof(uint32_t), tot);
+ }
+ 
+ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+@@ -4446,7 +4314,6 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     uint32_t i;
+-    uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
+     uint64_t s1 =  *((uint64_t *)vs1);
+ 
+     for (i = 0; i < vl; i++) {
+@@ -4458,7 +4325,6 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+                          &env->fp_status);
+     }
+     *((uint64_t *)vd) = s1;
+-    clearq(vd, 1, sizeof(uint64_t), tot);
+ }
+ 
+ /*
+@@ -4600,11 +4466,10 @@ void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
+ }
+ 
+ /* Vector Iota Instruction */
+-#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H, CLEAR_FN)                        \
++#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H)                                  \
+ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env,      \
+                   uint32_t desc)                                          \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t sum = 0;                                                     \
+@@ -4619,19 +4484,17 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env,      \
+             sum++;                                                        \
+         }                                                                 \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+-GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1, clearb)
+-GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2, clearh)
+-GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4, clearl)
+-GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq)
++GEN_VEXT_VIOTA_M(viota_m_b, uint8_t,  H1)
++GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2)
++GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4)
++GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8)
+ 
+ /* Vector Element Index Instruction */
+-#define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN)                          \
++#define GEN_VEXT_VID_V(NAME, ETYPE, H)                                    \
+ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc)  \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     int i;                                                                \
+@@ -4642,24 +4505,22 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc)  \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = i;                                        \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+-GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb)
+-GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh)
+-GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl)
+-GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq)
++GEN_VEXT_VID_V(vid_v_b, uint8_t,  H1)
++GEN_VEXT_VID_V(vid_v_h, uint16_t, H2)
++GEN_VEXT_VID_V(vid_v_w, uint32_t, H4)
++GEN_VEXT_VID_V(vid_v_d, uint64_t, H8)
+ 
+ /*
+  *** Vector Permutation Instructions
+  */
+ 
+ /* Vector Slide Instructions */
+-#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN)                    \
++#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H)                              \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     target_ulong offset = s1, i;                                          \
+@@ -4670,16 +4531,15 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset));          \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+ /* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
+-GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb)
+-GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh)
+-GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl)
+-GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq)
++GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t,  H1)
++GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2)
++GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4)
++GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8)
+ 
+-#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN)                  \
++#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H)                            \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+@@ -4695,20 +4555,18 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j));  \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+ /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
+-GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb)
+-GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh)
+-GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl)
+-GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq)
++GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t,  H1)
++GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2)
++GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
++GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
+ 
+-#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN)                   \
++#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H)                             \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t i;                                                           \
+@@ -4723,20 +4581,18 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1));           \
+         }                                                                 \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+ /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb)
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh)
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl)
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq)
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t,  H1)
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2)
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4)
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8)
+ 
+-#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN)                 \
++#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H)                           \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t i;                                                           \
+@@ -4751,17 +4607,16 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1));           \
+         }                                                                 \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+ /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb)
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh)
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl)
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t,  H1)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
+ 
+ /* Vector Register Gather Instruction */
+-#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H, CLEAR_FN)                    \
++#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H)                              \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+@@ -4782,16 +4637,15 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index));           \
+         }                                                                 \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+ /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1, clearb)
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2, clearh)
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4, clearl)
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t,  H1)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8)
+ 
+-#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H, CLEAR_FN)                    \
++#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H)                              \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+@@ -4811,21 +4665,19 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index));           \
+         }                                                                 \
+     }                                                                     \
+-    CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE));          \
+ }
+ 
+ /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
+-GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1, clearb)
+-GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2, clearh)
+-GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4, clearl)
+-GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq)
++GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t,  H1)
++GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2)
++GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4)
++GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8)
+ 
+ /* Vector Compress Instruction */
+-#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H, CLEAR_FN)                   \
++#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H)                             \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t num = 0, i;                                                  \
+                                                                           \
+@@ -4836,11 +4688,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+         *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i));                 \
+         num++;                                                            \
+     }                                                                     \
+-    CLEAR_FN(vd, num, num * sizeof(ETYPE), vlmax * sizeof(ETYPE));        \
+ }
+ 
+ /* Compress into vd elements of vs2 where vs1 is enabled */
+-GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1, clearb)
+-GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2, clearh)
+-GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4, clearl)
+-GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8, clearq)
++GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t,  H1)
++GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2)
++GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4)
++GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8)
+-- 
+2.33.1
+

+ 1088 - 0
recipes-devtools/qemu/qemu/0022-target-riscv-rvv-1.0-update-check-functions.patch

@@ -0,0 +1,1088 @@
+From 6a82af7b913410fe993671d033d1ee4402d10d4a Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 28 Sep 2020 16:23:30 +0800
+Subject: [PATCH 022/107] target/riscv: rvv-1.0: update check functions
+
+Update check functions with RVV 1.0 rules.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 739 ++++++++++++++++--------
+ 1 file changed, 505 insertions(+), 234 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 46e18a62b5..f666c64bbe 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -19,11 +19,124 @@
+ #include "tcg/tcg-gvec-desc.h"
+ #include "internals.h"
+ 
++static inline bool is_overlapped(const int8_t astart, int8_t asize,
++                                 const int8_t bstart, int8_t bsize)
++{
++    const int8_t aend = astart + asize;
++    const int8_t bend = bstart + bsize;
++
++    return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
++}
++
++static bool require_rvv(DisasContext *s)
++{
++    return s->mstatus_vs != 0;
++}
++
++static bool require_rvf(DisasContext *s)
++{
++    if (s->mstatus_fs == 0) {
++        return false;
++    }
++
++    switch (s->sew) {
++    case MO_16:
++    case MO_32:
++        return has_ext(s, RVF);
++    case MO_64:
++        return has_ext(s, RVD);
++    default:
++        return false;
++    }
++}
++
++static bool require_scale_rvf(DisasContext *s)
++{
++    if (s->mstatus_fs == 0) {
++        return false;
++    }
++
++    switch (s->sew) {
++    case MO_8:
++    case MO_16:
++        return has_ext(s, RVF);
++    case MO_32:
++        return has_ext(s, RVD);
++    default:
++        return false;
++    }
++}
++
++/* Destination vector register group cannot overlap source mask register. */
++static bool require_vm(int vm, int vd)
++{
++    return (vm != 0 || vd != 0);
++}
++
++static bool require_nf(int vd, int nf, int lmul)
++{
++    int size = nf << MAX(lmul, 0);
++    return size <= 8 && vd + size <= 32;
++}
++
++/*
++ * Vector register should aligned with the passed-in LMUL (EMUL).
++ * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
++ */
++static bool require_align(const int8_t val, const int8_t lmul)
++{
++    return lmul <= 0 || extract32(val, 0, lmul) == 0;
++}
++
++/*
++ * A destination vector register group can overlap a source vector
++ * register group only if one of the following holds:
++ *  1. The destination EEW equals the source EEW.
++ *  2. The destination EEW is smaller than the source EEW and the overlap
++ *     is in the lowest-numbered part of the source register group.
++ *  3. The destination EEW is greater than the source EEW, the source EMUL
++ *     is at least 1, and the overlap is in the highest-numbered part of
++ *     the destination register group.
++ * (Section 5.2)
++ *
++ * This function returns true if one of the following holds:
++ *  * Destination vector register group does not overlap a source vector
++ *    register group.
++ *  * Rule 3 met.
++ * For rule 1, overlap is allowed so this function doesn't need to be called.
++ * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
++ * calling this function.
++ */
++static bool require_noover(const int8_t dst, const int8_t dst_lmul,
++                           const int8_t src, const int8_t src_lmul)
++{
++    int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
++    int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
++
++    /* Destination EEW is greater than the source EEW, check rule 3. */
++    if (dst_size > src_size) {
++        if (dst < src &&
++            src_lmul >= 0 &&
++            is_overlapped(dst, dst_size, src, src_size) &&
++            !is_overlapped(dst, dst_size, src + src_size, src_size)) {
++            return true;
++        }
++    }
++
++    return !is_overlapped(dst, dst_size, src, src_size);
++}
++
++static bool require_noover_seg(const int8_t dst, const int8_t nf,
++                               const int8_t src)
++{
++    return !is_overlapped(dst, nf, src, 1);
++}
++
+ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
+ {
+     TCGv s1, s2, dst;
+ 
+-    if (!has_ext(ctx, RVV)) {
++    if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
+         return false;
+     }
+ 
+@@ -56,7 +169,7 @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
+ {
+     TCGv s1, s2, dst;
+ 
+-    if (!has_ext(ctx, RVV)) {
++    if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
+         return false;
+     }
+ 
+@@ -100,54 +213,246 @@ static bool vext_check_isa_ill(DisasContext *s)
+     return !s->vill;
+ }
+ 
++static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
++{
++    return require_vm(vm, vd) &&
++        require_align(vd, s->lmul) &&
++        require_align(vs, s->lmul);
++}
++
+ /*
+- * There are two rules check here.
++ * Check function for vector instruction with format:
++ * single-width result and single-width sources (SEW = SEW op SEW)
+  *
+- * 1. Vector register numbers are multiples of LMUL. (Section 3.2)
++ * Rules to be checked here:
++ *   1. Destination vector register group for a masked vector
++ *      instruction cannot overlap the source mask register (v0).
++ *      (Section 5.3)
++ *   2. Destination vector register number is multiples of LMUL.
++ *      (Section 3.3.2)
++ *   3. Source (vs2, vs1) vector register number are multiples of LMUL.
++ *      (Section 3.3.2)
++ */
++static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
++{
++    return vext_check_ss(s, vd, vs2, vm) &&
++        require_align(vs1, s->lmul);
++}
++
++static bool vext_check_ms(DisasContext *s, int vd, int vs)
++{
++    bool ret = require_align(vs, s->lmul);
++    if (vd != vs) {
++        ret &= require_noover(vd, 0, vs, s->lmul);
++    }
++    return ret;
++}
++
++/*
++ * Check function for maskable vector instruction with format:
++ * single-width result and single-width sources (SEW = SEW op SEW)
+  *
+- * 2. For all widening instructions, the destination LMUL value must also be
+- *    a supported LMUL value. (Section 11.2)
++ * Rules to be checked here:
++ *   1. Source (vs2, vs1) vector register number are multiples of LMUL.
++ *      (Section 3.3.2)
++ *   2. Destination vector register cannot overlap a source vector
++ *      register (vs2, vs1) group.
++ *      (Section 5.2)
++ *   3. The destination vector register group for a masked vector
++ *      instruction cannot overlap the source mask register (v0),
++ *      unless the destination vector register is being written
++ *      with a mask value (e.g., comparisons) or the scalar result
++ *      of a reduction. (Section 5.3)
+  */
+-static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
++static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
+ {
+-    /*
+-     * The destination vector register group results are arranged as if both
+-     * SEW and LMUL were at twice their current settings. (Section 11.2).
+-     */
+-    int legal = widen ? 2 << s->lmul : 1 << s->lmul;
++    bool ret = vext_check_ms(s, vd, vs2) &&
++        require_align(vs1, s->lmul);
++    if (vd != vs1) {
++        ret &= require_noover(vd, 0, vs1, s->lmul);
++    }
++    return ret;
++}
+ 
+-    return !((s->lmul == 0x3 && widen) || (reg % legal));
++/*
++ * Common check function for vector widening instructions
++ * of double-width result (2*SEW).
++ *
++ * Rules to be checked here:
++ *   1. The largest vector register group used by an instruction
++ *      can not be greater than 8 vector registers (Section 5.2):
++ *      => LMUL < 8.
++ *      => SEW < 64.
++ *   2. Destination vector register number is multiples of 2 * LMUL.
++ *      (Section 3.3.2, 11.2)
++ *   3. Destination vector register group for a masked vector
++ *      instruction cannot overlap the source mask register (v0).
++ *      (Section 5.3)
++ */
++static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
++{
++    return (s->lmul <= 2) &&
++           (s->sew < MO_64) &&
++           require_align(vd, s->lmul + 1) &&
++           require_vm(vm, vd);
+ }
+ 
+ /*
+- * There are two rules check here.
++ * Common check function for vector narrowing instructions
++ * of single-width result (SEW) and double-width source (2*SEW).
++ *
++ * Rules to be checked here:
++ *   1. The largest vector register group used by an instruction
++ *      can not be greater than 8 vector registers (Section 5.2):
++ *      => LMUL < 8.
++ *      => SEW < 64.
++ *   2. Source vector register number is multiples of 2 * LMUL.
++ *      (Section 3.3.2, 11.3)
++ *   3. Destination vector register number is multiples of LMUL.
++ *      (Section 3.3.2, 11.3)
++ *   4. Destination vector register group for a masked vector
++ *      instruction cannot overlap the source mask register (v0).
++ *      (Section 5.3)
++ */
++static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
++                                     int vm)
++{
++    return (s->lmul <= 2) &&
++           (s->sew < MO_64) &&
++           require_align(vs2, s->lmul + 1) &&
++           require_align(vd, s->lmul) &&
++           require_vm(vm, vd);
++}
++
++static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
++{
++    return vext_wide_check_common(s, vd, vm) &&
++        require_align(vs, s->lmul) &&
++        require_noover(vd, s->lmul + 1, vs, s->lmul);
++}
++
++static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
++{
++    return vext_wide_check_common(s, vd, vm) &&
++        require_align(vs, s->lmul + 1);
++}
++
++/*
++ * Check function for vector instruction with format:
++ * double-width result and single-width sources (2*SEW = SEW op SEW)
+  *
+- * 1. The destination vector register group for a masked vector instruction can
+- *    only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
++ * Rules to be checked here:
++ *   1. All rules in defined in widen common rules are applied.
++ *   2. Source (vs2, vs1) vector register number are multiples of LMUL.
++ *      (Section 3.3.2)
++ *   3. Destination vector register cannot overlap a source vector
++ *      register (vs2, vs1) group.
++ *      (Section 5.2)
++ */
++static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
++{
++    return vext_check_ds(s, vd, vs2, vm) &&
++        require_align(vs1, s->lmul) &&
++        require_noover(vd, s->lmul + 1, vs1, s->lmul);
++}
++
++/*
++ * Check function for vector instruction with format:
++ * double-width result and double-width source1 and single-width
++ * source2 (2*SEW = 2*SEW op SEW)
+  *
+- * 2. In widen instructions and some other insturctions, like vslideup.vx,
+- *    there is no need to check whether LMUL=1.
++ * Rules to be checked here:
++ *   1. All rules in defined in widen common rules are applied.
++ *   2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
++ *      (Section 3.3.2)
++ *   3. Source 2 (vs1) vector register number is multiples of LMUL.
++ *      (Section 3.3.2)
++ *   4. Destination vector register cannot overlap a source vector
++ *      register (vs1) group.
++ *      (Section 5.2)
+  */
+-static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
+-    bool force)
++static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
+ {
+-    return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
++    return vext_check_ds(s, vd, vs1, vm) &&
++        require_align(vs2, s->lmul + 1);
+ }
+ 
+-/* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
+-static bool vext_check_nf(DisasContext *s, uint32_t nf)
++static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
+ {
+-    return (1 << s->lmul) * nf <= 8;
++    bool ret = vext_narrow_check_common(s, vd, vs, vm);
++    if (vd != vs) {
++        ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
++    }
++    return ret;
+ }
+ 
+ /*
+- * The destination vector register group cannot overlap a source vector register
+- * group of a different element width. (Section 11.2)
++ * Check function for vector instruction with format:
++ * single-width result and double-width source 1 and single-width
++ * source 2 (SEW = 2*SEW op SEW)
++ *
++ * Rules to be checked here:
++ *   1. All rules in defined in narrow common rules are applied.
++ *   2. Destination vector register cannot overlap a source vector
++ *      register (vs2) group.
++ *      (Section 5.2)
++ *   3. Source 2 (vs1) vector register number is multiples of LMUL.
++ *      (Section 3.3.2)
+  */
+-static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen)
++static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
+ {
+-    return ((rd >= rs + slen) || (rs >= rd + dlen));
++    return vext_check_sd(s, vd, vs2, vm) &&
++        require_align(vs1, s->lmul);
+ }
++
++/*
++ * Check function for vector reduction instructions.
++ *
++ * Rules to be checked here:
++ *   1. Source 1 (vs2) vector register number is multiples of LMUL.
++ *      (Section 3.3.2)
++ */
++static bool vext_check_reduction(DisasContext *s, int vs2)
++{
++    return require_align(vs2, s->lmul) && (s->vstart == 0);
++}
++
++/*
++ * Check function for vector slide instructions.
++ *
++ * Rules to be checked here:
++ *   1. Source 1 (vs2) vector register number is multiples of LMUL.
++ *      (Section 3.3.2)
++ *   2. Destination vector register number is multiples of LMUL.
++ *      (Section 3.3.2)
++ *   3. Destination vector register group for a masked vector
++ *      instruction cannot overlap the source mask register (v0).
++ *      (Section 5.3)
++ *   4. The destination vector register group for vslideup, vslide1up,
++ *      vfslide1up, cannot overlap the source vector register (vs2) group.
++ *      (Section 5.2, 17.3.1, 17.3.3)
++ */
++static bool vext_check_slide(DisasContext *s, int vd, int vs2,
++                             int vm, bool is_over)
++{
++    bool ret = require_align(vs2, s->lmul) &&
++               require_align(vd, s->lmul) &&
++               require_vm(vm, vd);
++    if (is_over) {
++        ret &= (vd != vs2);
++    }
++    return ret;
++}
++
++/*
++ * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
++ * So RVV is also be checked in this function.
++ */
++static bool vext_check_isa_ill(DisasContext *s)
++{
++    return !s->vill;
++}
++
+ /* common translation macro */
+ #define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK)      \
+ static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
+@@ -803,11 +1108,9 @@ GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check)
+ 
+ static bool opivv_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
+@@ -898,10 +1201,9 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
+ 
+ static bool opivx_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ss(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
+@@ -1098,16 +1400,9 @@ GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
+ /* OPIVV with WIDEN */
+ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+-                                     1 << s->lmul) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+-                                     1 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
+@@ -1152,13 +1447,9 @@ GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
+ /* OPIVX with WIDEN */
+ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+-                                     1 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ds(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
+@@ -1189,14 +1480,9 @@ GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
+ /* WIDEN OPIVV with WIDEN */
+ static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, true) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+-                                     1 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
+@@ -1239,11 +1525,9 @@ GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
+ /* WIDEN OPIVX with WIDEN */
+ static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, true) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_dd(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
+@@ -1304,11 +1588,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+  */
+ static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            ((a->rd != 0) || (s->lmul == 0)));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           (a->rd != 0) &&
++           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
+@@ -1320,11 +1603,9 @@ GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
+  */
+ static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
+-            vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_mss(s, a->rd, a->rs1, a->rs2);
+ }
+ 
+ GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
+@@ -1332,10 +1613,10 @@ GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
+ 
+ static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            ((a->rd != 0) || (s->lmul == 0)));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           (a->rd != 0) &&
++           vext_check_ss(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ /* OPIVX without GVEC IR */
+@@ -1358,9 +1639,9 @@ GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
+ 
+ static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ms(s, a->rd, a->rs2);
+ }
+ 
+ GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
+@@ -1451,14 +1732,9 @@ GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx,  sari)
+ /* Vector Narrowing Integer Right Shift Instructions */
+ static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, true) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
+-                2 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ /* OPIVV with NARROW */
+@@ -1492,13 +1768,9 @@ GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
+ 
+ static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, true) &&
+-            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
+-                2 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_sd(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ /* OPIVX with NARROW */
+@@ -1546,13 +1818,11 @@ GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx)
+  */
+ static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
+-              vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
+-             (s->lmul == 0)));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_mss(s, a->rd, a->rs1, a->rs2);
+ }
++
+ GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
+ GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
+ GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
+@@ -1562,10 +1832,9 @@ GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
+ 
+ static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
+-             (s->lmul == 0)));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ms(s, a->rd, a->rs2);
+ }
+ 
+ GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
+@@ -1644,10 +1913,10 @@ GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
+ /* Vector Integer Merge and Move Instructions */
+ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
+ {
+-    if (vext_check_isa_ill(s) &&
+-        vext_check_reg(s, a->rd, false) &&
+-        vext_check_reg(s, a->rs1, false)) {
+-
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s) &&
++        /* vmv.v.v has rs2 = 0 and vm = 1 */
++        vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
+         if (s->vl_eq_vlmax) {
+             tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
+                              vreg_ofs(s, a->rs1),
+@@ -1674,9 +1943,10 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
+ typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
+ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
+ {
+-    if (vext_check_isa_ill(s) &&
+-        vext_check_reg(s, a->rd, false)) {
+-
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s) &&
++        /* vmv.v.x has rs2 = 0 and vm = 1 */
++        vext_check_ss(s, a->rd, 0, 1)) {
+         TCGv s1;
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+@@ -1717,9 +1987,10 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
+ 
+ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
+ {
+-    if (vext_check_isa_ill(s) &&
+-        vext_check_reg(s, a->rd, false)) {
+-
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s) &&
++        /* vmv.v.i has rs2 = 0 and vm = 1 */
++        vext_check_ss(s, a->rd, 0, 1)) {
+         int64_t simm = sextract64(a->rs1, 0, 5);
+         if (s->vl_eq_vlmax) {
+             tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
+@@ -1821,12 +2092,10 @@ GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
+  */
+ static bool opfvv_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            (s->sew != 0));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ /* OPFVV without GVEC IR */
+@@ -1891,17 +2160,16 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+     return true;
+ }
+ 
+-static bool opfvf_check(DisasContext *s, arg_rmrr *a)
+-{
+ /*
+  * If the current SEW does not correspond to a supported IEEE floating-point
+  * type, an illegal instruction exception is raised
+  */
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            (s->sew != 0));
++static bool opfvf_check(DisasContext *s, arg_rmrr *a)
++{
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ss(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ /* OPFVF without GVEC IR */
+@@ -1931,16 +2199,10 @@ GEN_OPFVF_TRANS(vfrsub_vf,  opfvf_check)
+ /* Vector Widening Floating-Point Add/Subtract Instructions */
+ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+-                                     1 << s->lmul) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+-                                     1 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ /* OPFVV with WIDEN */
+@@ -1974,13 +2236,10 @@ GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
+ 
+ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+-                                     1 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ds(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ /* OPFVF with WIDEN */
+@@ -2006,14 +2265,10 @@ GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
+ 
+ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, true) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+-                                     1 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
+ }
+ 
+ /* WIDEN OPFVV with WIDEN */
+@@ -2047,11 +2302,10 @@ GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
+ 
+ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, true) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_dd(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ /* WIDEN OPFVF with WIDEN */
+@@ -2122,11 +2376,11 @@ GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
+  */
+ static bool opfv_check(DisasContext *s, arg_rmr *a)
+ {
+-   return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            (s->sew != 0));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           /* OPFV instructions ignore vs1 check */
++           vext_check_ss(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ #define GEN_OPFV_TRANS(NAME, CHECK)                                \
+@@ -2174,13 +2428,10 @@ GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
+ /* Vector Floating-Point Compare Instructions */
+ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            (s->sew != 0) &&
+-            ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
+-              vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
+-             (s->lmul == 0)));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_mss(s, a->rd, a->rs1, a->rs2);
+ }
+ 
+ GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
+@@ -2191,11 +2442,10 @@ GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
+ 
+ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            (s->sew != 0) &&
+-            (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
+-             (s->lmul == 0)));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ms(s, a->rd, a->rs2);
+ }
+ 
+ GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
+@@ -2214,10 +2464,10 @@ GEN_OPFVF_TRANS(vfmerge_vfm,  opfvf_check)
+ 
+ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+ {
+-    if (vext_check_isa_ill(s) &&
+-        vext_check_reg(s, a->rd, false) &&
+-        (s->sew != 0)) {
+-
++    if (require_rvv(s) &&
++        require_rvf(s) &&
++        vext_check_isa_ill(s) &&
++        require_align(a->rd, s->lmul)) {
+         if (s->vl_eq_vlmax) {
+             tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
+                                  MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
+@@ -2263,13 +2513,11 @@ GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
+  */
+ static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, true) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+-                                     1 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
++    return require_rvv(s) &&
++           require_scale_rvf(s) &&
++           (s->sew != MO_8) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ds(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ #define GEN_OPFV_WIDEN_TRANS(NAME)                                 \
+@@ -2311,13 +2559,12 @@ GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
+  */
+ static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, true) &&
+-            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
+-                                     2 << s->lmul) &&
+-            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
++    return require_rvv(s) &&
++           require_rvf(s) &&
++           (s->sew != MO_64) &&
++           vext_check_isa_ill(s) &&
++           /* OPFV narrowing instructions ignore vs1 check */
++           vext_check_sd(s, a->rd, a->rs2, a->vm);
+ }
+ 
+ #define GEN_OPFV_NARROW_TRANS(NAME)                                \
+@@ -2357,7 +2604,9 @@ GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
+ /* Vector Single-Width Integer Reduction Instructions */
+ static bool reduction_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false);
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_reduction(s, a->rs2);
+ }
+ 
+ GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
+@@ -2370,8 +2619,13 @@ GEN_OPIVV_TRANS(vredor_vs, reduction_check)
+ GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
+ 
+ /* Vector Widening Integer Reduction Instructions */
+-GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_check)
+-GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check)
++static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
++{
++    return reduction_check(s, a) && (s->sew < MO_64);
++}
++
++GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
++GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
+ 
+ /* Vector Single-Width Floating-Point Reduction Instructions */
+ GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
+@@ -2419,7 +2673,8 @@ GEN_MM_TRANS(vmxnor_mm)
+ /* Vector mask population count vmpopc */
+ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
+ {
+-    if (vext_check_isa_ill(s)) {
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s)) {
+         TCGv_ptr src2, mask;
+         TCGv dst;
+         TCGv_i32 desc;
+@@ -2450,7 +2705,8 @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
+ /* vmfirst find-first-set mask bit */
+ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
+ {
+-    if (vext_check_isa_ill(s)) {
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s)) {
+         TCGv_ptr src2, mask;
+         TCGv dst;
+         TCGv_i32 desc;
+@@ -2509,10 +2765,11 @@ GEN_M_TRANS(vmsof_m)
+ /* Vector Iota Instruction */
+ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
+ {
+-    if (vext_check_isa_ill(s) &&
+-        vext_check_reg(s, a->rd, false) &&
+-        vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, 1) &&
+-        (a->vm != 0 || a->rd != 0)) {
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s) &&
++        require_noover(a->rd, s->lmul, a->rs2, 0) &&
++        require_vm(a->vm, a->rd) &&
++        require_align(a->rd, s->lmul)) {
+         uint32_t data = 0;
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+@@ -2536,9 +2793,10 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
+ /* Vector Element Index Instruction */
+ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
+ {
+-    if (vext_check_isa_ill(s) &&
+-        vext_check_reg(s, a->rd, false) &&
+-        vext_check_overlap_mask(s, a->rd, a->vm, false)) {
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s) &&
++        require_align(a->rd, s->lmul) &&
++        require_vm(a->vm, a->rd)) {
+         uint32_t data = 0;
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+@@ -2788,41 +3046,48 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
+ /* Vector Slide Instructions */
+ static bool slideup_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            (a->rd != a->rs2));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_slide(s, a->rd, a->rs2, a->vm, true);
+ }
+ 
+ GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
+ GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
+ GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
+ 
+-GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
+-GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
+-GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
++static bool slidedown_check(DisasContext *s, arg_rmrr *a)
++{
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_slide(s, a->rd, a->rs2, a->vm, false);
++}
++
++GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
++GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
++GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, slidedown_check)
+ 
+ /* Vector Register Gather Instruction */
+ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs1, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            (a->rd != a->rs2) && (a->rd != a->rs1));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           require_align(a->rd, s->lmul) &&
++           require_align(a->rs1, s->lmul) &&
++           require_align(a->rs2, s->lmul) &&
++           (a->rd != a->rs2 && a->rd != a->rs1) &&
++           require_vm(a->vm, a->rd);
+ }
+ 
+ GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
+ 
+ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            (a->rd != a->rs2));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           require_align(a->rd, s->lmul) &&
++           require_align(a->rs2, s->lmul) &&
++           (a->rd != a->rs2) &&
++           require_vm(a->vm, a->rd);
+ }
+ 
+ /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
+@@ -2883,14 +3148,20 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
+     return true;
+ }
+ 
+-/* Vector Compress Instruction */
++/*
++ * Vector Compress Instruction
++ *
++ * The destination vector register group cannot overlap the
++ * source vector register group or the source mask register.
++ */
+ static bool vcompress_vm_check(DisasContext *s, arg_r *a)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs1, 1) &&
+-            (a->rd != a->rs2));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           require_align(a->rd, s->lmul) &&
++           require_align(a->rs2, s->lmul) &&
++           (a->rd != a->rs2) &&
++           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
+ }
+ 
+ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
+-- 
+2.33.1
+

+ 299 - 0
recipes-devtools/qemu/qemu/0023-target-riscv-introduce-more-imm-value-modes-in-trans.patch

@@ -0,0 +1,299 @@
+From 305f1792e72938914801e9d20b1b2f2d16d57cc7 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 7 Aug 2020 15:28:58 +0800
+Subject: [PATCH 023/107] target/riscv: introduce more imm value modes in
+ translator functions
+
+Immediate value in translator function is extended not only
+zero-extended and sign-extended but with more modes to be applicable
+with multiple formats of vector instructions.
+
+* IMM_ZX:         Zero-extended
+* IMM_SX:         Sign-extended
+* IMM_TRUNC_SEW:  Truncate to log(SEW) bit
+* IMM_TRUNC_2SEW: Truncate to log(2*SEW) bit
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 115 ++++++++++++++----------
+ 1 file changed, 66 insertions(+), 49 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index f666c64bbe..99d14caa14 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -1307,8 +1307,32 @@ static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ 
+ GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
+ 
++typedef enum {
++    IMM_ZX,         /* Zero-extended */
++    IMM_SX,         /* Sign-extended */
++    IMM_TRUNC_SEW,  /* Truncate to log(SEW) bits */
++    IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
++} imm_mode_t;
++
++static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
++{
++    switch (imm_mode) {
++    case IMM_ZX:
++        return extract64(imm, 0, 5);
++    case IMM_SX:
++        return sextract64(imm, 0, 5);
++    case IMM_TRUNC_SEW:
++        return extract64(imm, 0, s->sew + 3);
++    case IMM_TRUNC_2SEW:
++        return extract64(imm, 0, s->sew + 4);
++    default:
++        g_assert_not_reached();
++    }
++}
++
+ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
+-                        gen_helper_opivx *fn, DisasContext *s, int zx)
++                        gen_helper_opivx *fn, DisasContext *s,
++                        imm_mode_t imm_mode)
+ {
+     TCGv_ptr dest, src2, mask;
+     TCGv src1;
+@@ -1321,11 +1345,8 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
+     dest = tcg_temp_new_ptr();
+     mask = tcg_temp_new_ptr();
+     src2 = tcg_temp_new_ptr();
+-    if (zx) {
+-        src1 = tcg_const_tl(imm);
+-    } else {
+-        src1 = tcg_const_tl(sextract64(imm, 0, 5));
+-    }
++    src1 = tcg_const_tl(extract_imm(s, imm, imm_mode));
++
+     data = FIELD_DP32(data, VDATA, VM, vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
+@@ -1351,28 +1372,23 @@ typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
+ 
+ static inline bool
+ do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
+-              gen_helper_opivx *fn, int zx)
++              gen_helper_opivx *fn, imm_mode_t imm_mode)
+ {
+     if (!opivx_check(s, a)) {
+         return false;
+     }
+ 
+     if (a->vm && s->vl_eq_vlmax) {
+-        if (zx) {
+-            gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
+-                    extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
+-        } else {
+-            gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
+-                    sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
+-        }
++        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
++                extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
+         mark_vs_dirty(s);
+         return true;
+     }
+-    return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
++    return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
+ }
+ 
+ /* OPIVI with GVEC IR */
+-#define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \
++#define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+ {                                                                  \
+     static gen_helper_opivx * const fns[4] = {                     \
+@@ -1380,10 +1396,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+         gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,            \
+     };                                                             \
+     return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF,                 \
+-                         fns[s->sew], ZX);                         \
++                         fns[s->sew], IMM_MODE);                   \
+ }
+ 
+-GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi)
++GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
+ 
+ static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
+                                int64_t c, uint32_t oprsz, uint32_t maxsz)
+@@ -1393,7 +1409,7 @@ static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
+     tcg_temp_free_i64(tmp);
+ }
+ 
+-GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
++GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
+ 
+ /* Vector Widening Integer Add/Subtract */
+ 
+@@ -1648,7 +1664,7 @@ GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
+ GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
+ 
+ /* OPIVI without GVEC IR */
+-#define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK)                          \
++#define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK)                    \
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+ {                                                                        \
+     if (CHECK(s, a)) {                                                   \
+@@ -1657,13 +1673,13 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+             gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,              \
+         };                                                               \
+         return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
+-                           fns[s->sew], s, ZX);                          \
++                           fns[s->sew], s, IMM_MODE);                    \
+     }                                                                    \
+     return false;                                                        \
+ }
+ 
+-GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check)
+-GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check)
++GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
++GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
+ 
+ /* Vector Bitwise Logical Instructions */
+ GEN_OPIVV_GVEC_TRANS(vand_vv, and)
+@@ -1672,9 +1688,9 @@ GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
+ GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
+ GEN_OPIVX_GVEC_TRANS(vor_vx,  ors)
+ GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
+-GEN_OPIVI_GVEC_TRANS(vand_vi, 0, vand_vx, andi)
+-GEN_OPIVI_GVEC_TRANS(vor_vi, 0, vor_vx,  ori)
+-GEN_OPIVI_GVEC_TRANS(vxor_vi, 0, vxor_vx, xori)
++GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
++GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx,  ori)
++GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
+ 
+ /* Vector Single-Width Bit Shift Instructions */
+ GEN_OPIVV_GVEC_TRANS(vsll_vv,  shlv)
+@@ -1725,9 +1741,9 @@ GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx,  shls)
+ GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx,  shrs)
+ GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx,  sars)
+ 
+-GEN_OPIVI_GVEC_TRANS(vsll_vi, 1, vsll_vx,  shli)
+-GEN_OPIVI_GVEC_TRANS(vsrl_vi, 1, vsrl_vx,  shri)
+-GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx,  sari)
++GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_ZX, vsll_vx, shli)
++GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_ZX, vsrl_vx, shri)
++GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_ZX, vsra_vx, sari)
+ 
+ /* Vector Narrowing Integer Right Shift Instructions */
+ static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
+@@ -1792,7 +1808,7 @@ GEN_OPIVX_NARROW_TRANS(vnsra_vx)
+ GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
+ 
+ /* OPIVI with NARROW */
+-#define GEN_OPIVI_NARROW_TRANS(NAME, ZX, OPIVX)                          \
++#define GEN_OPIVI_NARROW_TRANS(NAME, IMM_MODE, OPIVX)                    \
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+ {                                                                        \
+     if (opivx_narrow_check(s, a)) {                                      \
+@@ -1802,13 +1818,13 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+             gen_helper_##OPIVX##_w,                                      \
+         };                                                               \
+         return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
+-                           fns[s->sew], s, ZX);                          \
++                           fns[s->sew], s, IMM_MODE);                    \
+     }                                                                    \
+     return false;                                                        \
+ }
+ 
+-GEN_OPIVI_NARROW_TRANS(vnsra_vi, 1, vnsra_vx)
+-GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx)
++GEN_OPIVI_NARROW_TRANS(vnsra_vi, IMM_ZX, vnsra_vx)
++GEN_OPIVI_NARROW_TRANS(vnsrl_vi, IMM_ZX, vnsrl_vx)
+ 
+ /* Vector Integer Comparison Instructions */
+ /*
+@@ -1846,12 +1862,12 @@ GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
+ GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
+ GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
+ 
+-GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check)
+-GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check)
+-GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check)
+-GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check)
+-GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check)
+-GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmsleu_vi, IMM_ZX, vmsleu_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmsgtu_vi, IMM_ZX, vmsgtu_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
+ 
+ /* Vector Integer Min/Max Instructions */
+ GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
+@@ -2027,7 +2043,7 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
+ 
+ GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
+ GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
+-GEN_OPIVI_TRANS(vmerge_vim, 0, vmerge_vxm, opivx_vadc_check)
++GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
+ 
+ /*
+  *** Vector Fixed-Point Arithmetic Instructions
+@@ -2042,8 +2058,8 @@ GEN_OPIVX_TRANS(vsaddu_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vsadd_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vssubu_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vssub_vx,  opivx_check)
+-GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check)
+-GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check)
++GEN_OPIVI_TRANS(vsaddu_vi, IMM_ZX, vsaddu_vx, opivx_check)
++GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
+ 
+ /* Vector Single-Width Averaging Add and Subtract */
+ GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
+@@ -2070,16 +2086,16 @@ GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
+ GEN_OPIVV_TRANS(vssra_vv, opivv_check)
+ GEN_OPIVX_TRANS(vssrl_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vssra_vx,  opivx_check)
+-GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check)
+-GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check)
++GEN_OPIVI_TRANS(vssrl_vi, IMM_ZX, vssrl_vx, opivx_check)
++GEN_OPIVI_TRANS(vssra_vi, IMM_SX, vssra_vx, opivx_check)
+ 
+ /* Vector Narrowing Fixed-Point Clip Instructions */
+ GEN_OPIVV_NARROW_TRANS(vnclipu_vv)
+ GEN_OPIVV_NARROW_TRANS(vnclip_vv)
+ GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
+ GEN_OPIVX_NARROW_TRANS(vnclip_vx)
+-GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx)
+-GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
++GEN_OPIVI_NARROW_TRANS(vnclipu_vi, IMM_ZX, vnclipu_vx)
++GEN_OPIVI_NARROW_TRANS(vnclip_vi, IMM_ZX, vnclip_vx)
+ 
+ /*
+  *** Vector Float Point Arithmetic Instructions
+@@ -3053,7 +3069,7 @@ static bool slideup_check(DisasContext *s, arg_rmrr *a)
+ 
+ GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
+ GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
+-GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
++GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
+ 
+ static bool slidedown_check(DisasContext *s, arg_rmrr *a)
+ {
+@@ -3064,7 +3080,7 @@ static bool slidedown_check(DisasContext *s, arg_rmrr *a)
+ 
+ GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
+ GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
+-GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, slidedown_check)
++GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
+ 
+ /* Vector Register Gather Instruction */
+ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
+@@ -3143,7 +3159,8 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
+             gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
+             gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
+         };
+-        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1);
++        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
++                           s, IMM_ZX);
+     }
+     return true;
+ }
+-- 
+2.33.1
+

+ 87 - 0
recipes-devtools/qemu/qemu/0024-target-riscv-rvv-1.0-add-translation-time-nan-box-he.patch

@@ -0,0 +1,87 @@
+From d05effb256790ae2f68ed1bd9fe769bd4108fac5 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 5 Aug 2020 11:36:39 +0800
+Subject: [PATCH 024/107] target/riscv: rvv:1.0: add translation-time nan-box
+ helper function
+
+* Add fp16 nan-box check generator function, if a 16-bit input is not
+  properly nanboxed, then the input is replaced with the default qnan.
+* Add do_nanbox() helper function to utilize gen_check_nanbox_X() to
+  generate the NaN-boxed floating-point values based on SEW setting.
+* Apply nanbox helper in opfvf_trans().
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 35 ++++++++++++++++++++++++-
+ 1 file changed, 34 insertions(+), 1 deletion(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 99d14caa14..dcffc14909 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2100,6 +2100,33 @@ GEN_OPIVI_NARROW_TRANS(vnclip_vi, IMM_ZX, vnclip_vx)
+ /*
+  *** Vector Float Point Arithmetic Instructions
+  */
++
++/*
++ * As RVF-only cpus always have values NaN-boxed to 64-bits,
++ * RVF and RVD can be treated equally.
++ * We don't have to deal with the cases of: SEW > FLEN.
++ *
++ * If SEW < FLEN, check whether input fp register is a valid
++ * NaN-boxed value, in which case the least-significant SEW bits
++ * of the f regsiter are used, else the canonical NaN value is used.
++ */
++static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
++{
++    switch (s->sew) {
++    case 1:
++        gen_check_nanbox_h(out, in);
++        break;
++    case 2:
++        gen_check_nanbox_s(out, in);
++        break;
++    case 3:
++        tcg_gen_mov_i64(out, in);
++        break;
++    default:
++        g_assert_not_reached();
++    }
++}
++
+ /* Vector Single-Width Floating-Point Add/Subtract Instructions */
+ 
+ /*
+@@ -2152,6 +2179,7 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+ {
+     TCGv_ptr dest, src2, mask;
+     TCGv_i32 desc;
++    TCGv_i64 t1;
+ 
+     TCGLabel *over = gen_new_label();
+     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+@@ -2165,12 +2193,17 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+     tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
+     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ 
+-    fn(dest, mask, cpu_fpr[rs1], src2, cpu_env, desc);
++    /* NaN-box f[rs1] */
++    t1 = tcg_temp_new_i64();
++    do_nanbox(s, t1, cpu_fpr[rs1]);
++
++    fn(dest, mask, t1, src2, cpu_env, desc);
+ 
+     tcg_temp_free_ptr(dest);
+     tcg_temp_free_ptr(mask);
+     tcg_temp_free_ptr(src2);
+     tcg_temp_free_i32(desc);
++    tcg_temp_free_i64(t1);
+     mark_vs_dirty(s);
+     gen_set_label(over);
+     return true;
+-- 
+2.33.1
+

+ 133 - 0
recipes-devtools/qemu/qemu/0025-target-riscv-rvv-1.0-configure-instructions.patch

@@ -0,0 +1,133 @@
+From 371b8815c98cf4e3f8e2048c3b2ee1c2830e537f Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 30 Jul 2020 18:40:37 +0800
+Subject: [PATCH 025/107] target/riscv: rvv-1.0: configure instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 53 +++++++++----------------
+ target/riscv/vector_helper.c            | 14 ++++++-
+ 2 files changed, 31 insertions(+), 36 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index dcffc14909..fcb01d1b5f 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -132,28 +132,29 @@ static bool require_noover_seg(const int8_t dst, const int8_t nf,
+     return !is_overlapped(dst, nf, src, 1);
+ }
+ 
+-static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
++static bool do_vsetvl(DisasContext *ctx, int rd, int rs1, TCGv s2)
+ {
+-    TCGv s1, s2, dst;
++    TCGv s1, dst;
+ 
+     if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
+         return false;
+     }
+ 
+-    s2 = tcg_temp_new();
+     dst = tcg_temp_new();
+ 
+-    /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
+-    if (a->rs1 == 0) {
++    if (rd == 0 && rs1 == 0) {
++        s1 = tcg_temp_new();
++        tcg_gen_mov_tl(s1, cpu_vl);
++    } else if (rs1 == 0) {
+         /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
+         s1 = tcg_const_tl(RV_VLEN_MAX);
+     } else {
+         s1 = tcg_temp_new();
+-        gen_get_gpr(s1, a->rs1);
++        gen_get_gpr(s1, rs1);
+     }
+-    gen_get_gpr(s2, a->rs2);
++
+     gen_helper_vsetvl(dst, cpu_env, s1, s2);
+-    gen_set_gpr(a->rd, dst);
++    gen_set_gpr(rd, dst);
+     mark_vs_dirty(ctx);
+     tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
+     lookup_and_goto_ptr(ctx);
+@@ -165,35 +166,17 @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
+     return true;
+ }
+ 
+-static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
++static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
+ {
+-    TCGv s1, s2, dst;
+-
+-    if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
+-        return false;
+-    }
+-
+-    s2 = tcg_const_tl(a->zimm);
+-    dst = tcg_temp_new();
+-
+-    /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
+-    if (a->rs1 == 0) {
+-        /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
+-        s1 = tcg_const_tl(RV_VLEN_MAX);
+-    } else {
+-        s1 = tcg_temp_new();
+-        gen_get_gpr(s1, a->rs1);
+-    }
+-    gen_helper_vsetvl(dst, cpu_env, s1, s2);
+-    gen_set_gpr(a->rd, dst);
+-    mark_vs_dirty(ctx);
+-    gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
+-    ctx->base.is_jmp = DISAS_NORETURN;
++    TCGv s2 = tcg_temp_new();
++    gen_get_gpr(s2, a->rs2);
++    return do_vsetvl(ctx, a->rd, a->rs1, s2);
++}
+ 
+-    tcg_temp_free(s1);
+-    tcg_temp_free(s2);
+-    tcg_temp_free(dst);
+-    return true;
++static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
++{
++    TCGv s2 = tcg_const_tl(a->zimm);
++    return do_vsetvl(ctx, a->rd, a->rs1, s2);
+ }
+ 
+ /* vector register offset from env */
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 5a142a1f4b..e8912ee8fe 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -31,12 +31,24 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
+ {
+     int vlmax, vl;
+     RISCVCPU *cpu = env_archcpu(env);
++    uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL);
+     uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW);
+     uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
+     bool vill = FIELD_EX64(s2, VTYPE, VILL);
+     target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED);
+ 
+-    if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) {
++    if (lmul & 4) {
++        /* Fractional LMUL. */
++        if (lmul == 4 ||
++            cpu->cfg.elen >> (8 - lmul) < sew) {
++            vill = true;
++        }
++    }
++
++    if ((sew > cpu->cfg.elen)
++        || vill
++        || (ediv != 0)
++        || (reserved != 0)) {
+         /* only set vill bit. */
+         env->vtype = FIELD_DP64(0, VTYPE, VILL, 1);
+         env->vl = 0;
+-- 
+2.33.1
+

+ 845 - 0
recipes-devtools/qemu/qemu/0026-target-riscv-rvv-1.0-stride-load-and-store-instructi.patch

@@ -0,0 +1,845 @@
+From f2069daac8bc968e9c0d6781eab445f16b6f4bc9 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 14 Aug 2020 18:07:19 +0800
+Subject: [PATCH 026/107] target/riscv: rvv-1.0: stride load and store
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/helper.h                   | 129 +++-----------
+ target/riscv/insn32.decode              |  43 +++--
+ target/riscv/insn_trans/trans_rvv.c.inc | 227 +++++++++++-------------
+ target/riscv/vector_helper.c            | 190 ++++++--------------
+ 4 files changed, 194 insertions(+), 395 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 1104a3540a..3f4d460054 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -113,111 +113,30 @@ DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl)
+ 
+ /* Vector functions */
+ DEF_HELPER_3(vsetvl, tl, env, tl, tl)
+-DEF_HELPER_5(vlb_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlb_v_b_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlb_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlb_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlb_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlb_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlb_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlb_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlh_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlh_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlh_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlh_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlh_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlh_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlw_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlw_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlw_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlw_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_b_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vle_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_b_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbu_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhu_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhu_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhu_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhu_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhu_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhu_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwu_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwu_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwu_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwu_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_b_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsb_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsh_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsh_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsh_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsh_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsh_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsh_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsw_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsw_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsw_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vsw_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_b_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_h_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_w_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vse_v_d_mask, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_6(vlsb_v_b, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsb_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsb_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsb_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsh_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsh_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsh_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsw_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsw_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlse_v_b, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlse_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlse_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlse_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsbu_v_b, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsbu_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsbu_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlsbu_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlshu_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlshu_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlshu_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlswu_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlswu_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssb_v_b, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssb_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssb_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssb_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssh_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssh_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssh_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssw_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vssw_v_d, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vsse_v_b, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vsse_v_h, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vsse_v_w, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vsse_v_d, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_5(vle8_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle16_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle32_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle64_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle8_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle16_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle32_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle64_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse8_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse16_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse32_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse64_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse8_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse16_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse32_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vse64_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_6(vlse8_v, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_6(vlse16_v, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_6(vlse32_v, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_6(vlse64_v, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_6(vsse8_v, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_6(vsse16_v, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_6(vsse32_v, void, ptr, ptr, tl, tl, env, i32)
++DEF_HELPER_6(vsse64_v, void, ptr, ptr, tl, tl, env, i32)
+ DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 8d9064a7a0..03a1f6e53e 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -240,13 +240,26 @@ hfence_vvma 0010001  .....  ..... 000 00000 1110011 @hfence_vvma
+ # *** RV32V Extension ***
+ 
+ # *** Vector loads and stores are encoded within LOADFP/STORE-FP ***
+-vlb_v      ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm
+-vlh_v      ... 100 . 00000 ..... 101 ..... 0000111 @r2_nfvm
+-vlw_v      ... 100 . 00000 ..... 110 ..... 0000111 @r2_nfvm
+-vle_v      ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
+-vlbu_v     ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
+-vlhu_v     ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
+-vlwu_v     ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
++# Vector unit-stride load/store insns.
++vle8_v     ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
++vle16_v    ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
++vle32_v    ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
++vle64_v    ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
++vse8_v     ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
++vse16_v    ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
++vse32_v    ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
++vse64_v    ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
++
++# Vector strided insns.
++vlse8_v     ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
++vlse16_v    ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
++vlse32_v    ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
++vlse64_v    ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
++vsse8_v     ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
++vsse16_v    ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
++vsse32_v    ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
++vsse64_v    ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
++
+ vlbff_v    ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+ vlhff_v    ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+ vlwff_v    ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+@@ -254,22 +267,6 @@ vleff_v    ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
+ vlbuff_v   ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+ vlhuff_v   ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+ vlwuff_v   ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+-vsb_v      ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
+-vsh_v      ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
+-vsw_v      ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
+-vse_v      ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
+-
+-vlsb_v     ... 110 . ..... ..... 000 ..... 0000111 @r_nfvm
+-vlsh_v     ... 110 . ..... ..... 101 ..... 0000111 @r_nfvm
+-vlsw_v     ... 110 . ..... ..... 110 ..... 0000111 @r_nfvm
+-vlse_v     ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
+-vlsbu_v    ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
+-vlshu_v    ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
+-vlswu_v    ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
+-vssb_v     ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
+-vssh_v     ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
+-vssw_v     ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
+-vsse_v     ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
+ 
+ vlxb_v     ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
+ vlxh_v     ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index fcb01d1b5f..e4c83cf74d 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -188,9 +188,42 @@ static uint32_t vreg_ofs(DisasContext *s, int reg)
+ /* check functions */
+ 
+ /*
+- * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
+- * So RVV is also be checked in this function.
++ * Vector unit-stride, strided, unit-stride segment, strided segment
++ * store check function.
++ *
++ * Rules to be checked here:
++ *   1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
++ *   2. Destination vector register number is multiples of EMUL.
++ *      (Section 3.3.2, 7.3)
++ *   3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
++ *   4. Vector register numbers accessed by the segment load or store
++ *      cannot increment past 31. (Section 7.8)
++ */
++static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
++{
++    int8_t emul = eew - s->sew + s->lmul;
++    return (emul >= -3 && emul <= 3) &&
++            require_align(vd, emul) &&
++            require_nf(vd, nf, emul);
++}
++
++/*
++ * Vector unit-stride, strided, unit-stride segment, strided segment
++ * load check function.
++ *
++ * Rules to be checked here:
++ *   1. All rules applies to store instructions are applies
++ *      to load instructions.
++ *   2. Destination vector register group for a masked vector
++ *      instruction cannot overlap the source mask register (v0).
++ *      (Section 5.3)
+  */
++static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
++                            uint8_t eew)
++{
++    return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
++}
++
+ static bool vext_check_isa_ill(DisasContext *s)
+ {
+     return !s->vill;
+@@ -437,13 +470,13 @@ static bool vext_check_isa_ill(DisasContext *s)
+ }
+ 
+ /* common translation macro */
+-#define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK)      \
+-static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
+-{                                                          \
+-    if (CHECK(s, a)) {                                     \
+-        return OP(s, a, SEQ);                              \
+-    }                                                      \
+-    return false;                                          \
++#define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK)        \
++static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
++{                                                            \
++    if (CHECK(s, a, EEW)) {                                  \
++        return OP(s, a, EEW);                                \
++    }                                                        \
++    return false;                                            \
+ }
+ 
+ /*
+@@ -493,44 +526,20 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+     return true;
+ }
+ 
+-static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
++static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
+ {
+     uint32_t data = 0;
+     gen_helper_ldst_us *fn;
+-    static gen_helper_ldst_us * const fns[2][7][4] = {
++    static gen_helper_ldst_us * const fns[2][4] = {
+         /* masked unit stride load */
+-        { { gen_helper_vlb_v_b_mask,  gen_helper_vlb_v_h_mask,
+-            gen_helper_vlb_v_w_mask,  gen_helper_vlb_v_d_mask },
+-          { NULL,                     gen_helper_vlh_v_h_mask,
+-            gen_helper_vlh_v_w_mask,  gen_helper_vlh_v_d_mask },
+-          { NULL,                     NULL,
+-            gen_helper_vlw_v_w_mask,  gen_helper_vlw_v_d_mask },
+-          { gen_helper_vle_v_b_mask,  gen_helper_vle_v_h_mask,
+-            gen_helper_vle_v_w_mask,  gen_helper_vle_v_d_mask },
+-          { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask,
+-            gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask },
+-          { NULL,                     gen_helper_vlhu_v_h_mask,
+-            gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask },
+-          { NULL,                     NULL,
+-            gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } },
++        { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
++          gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
+         /* unmasked unit stride load */
+-        { { gen_helper_vlb_v_b,  gen_helper_vlb_v_h,
+-            gen_helper_vlb_v_w,  gen_helper_vlb_v_d },
+-          { NULL,                gen_helper_vlh_v_h,
+-            gen_helper_vlh_v_w,  gen_helper_vlh_v_d },
+-          { NULL,                NULL,
+-            gen_helper_vlw_v_w,  gen_helper_vlw_v_d },
+-          { gen_helper_vle_v_b,  gen_helper_vle_v_h,
+-            gen_helper_vle_v_w,  gen_helper_vle_v_d },
+-          { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h,
+-            gen_helper_vlbu_v_w, gen_helper_vlbu_v_d },
+-          { NULL,                gen_helper_vlhu_v_h,
+-            gen_helper_vlhu_v_w, gen_helper_vlhu_v_d },
+-          { NULL,                NULL,
+-            gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } }
++        { gen_helper_vle8_v, gen_helper_vle16_v,
++          gen_helper_vle32_v, gen_helper_vle64_v }
+     };
+ 
+-    fn =  fns[a->vm][seq][s->sew];
++    fn =  fns[a->vm][eew];
+     if (fn == NULL) {
+         return false;
+     }
+@@ -541,48 +550,32 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+     return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
+ }
+ 
+-static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
++static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_nf(s, a->nf));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_load(s, a->rd, a->nf, a->vm, eew);
+ }
+ 
+-GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check)
+-GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check)
+-GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check)
+-GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check)
+-GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check)
+-GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check)
+-GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check)
++GEN_VEXT_TRANS(vle8_v,  MO_8,  r2nfvm, ld_us_op, ld_us_check)
++GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
++GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
++GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
+ 
+-static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
++static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
+ {
+     uint32_t data = 0;
+     gen_helper_ldst_us *fn;
+-    static gen_helper_ldst_us * const fns[2][4][4] = {
+-        /* masked unit stride load and store */
+-        { { gen_helper_vsb_v_b_mask,  gen_helper_vsb_v_h_mask,
+-            gen_helper_vsb_v_w_mask,  gen_helper_vsb_v_d_mask },
+-          { NULL,                     gen_helper_vsh_v_h_mask,
+-            gen_helper_vsh_v_w_mask,  gen_helper_vsh_v_d_mask },
+-          { NULL,                     NULL,
+-            gen_helper_vsw_v_w_mask,  gen_helper_vsw_v_d_mask },
+-          { gen_helper_vse_v_b_mask,  gen_helper_vse_v_h_mask,
+-            gen_helper_vse_v_w_mask,  gen_helper_vse_v_d_mask } },
++    static gen_helper_ldst_us * const fns[2][4] = {
++        /* masked unit stride store */
++        { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
++          gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
+         /* unmasked unit stride store */
+-        { { gen_helper_vsb_v_b,  gen_helper_vsb_v_h,
+-            gen_helper_vsb_v_w,  gen_helper_vsb_v_d },
+-          { NULL,                gen_helper_vsh_v_h,
+-            gen_helper_vsh_v_w,  gen_helper_vsh_v_d },
+-          { NULL,                NULL,
+-            gen_helper_vsw_v_w,  gen_helper_vsw_v_d },
+-          { gen_helper_vse_v_b,  gen_helper_vse_v_h,
+-            gen_helper_vse_v_w,  gen_helper_vse_v_d } }
++        { gen_helper_vse8_v, gen_helper_vse16_v,
++          gen_helper_vse32_v, gen_helper_vse64_v }
+     };
+ 
+-    fn =  fns[a->vm][seq][s->sew];
++    fn =  fns[a->vm][eew];
+     if (fn == NULL) {
+         return false;
+     }
+@@ -593,17 +586,17 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+     return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
+ }
+ 
+-static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
++static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_nf(s, a->nf));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_store(s, a->rd, a->nf, eew);
+ }
+ 
+-GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check)
+-GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check)
+-GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check)
+-GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check)
++GEN_VEXT_TRANS(vse8_v,  MO_8,  r2nfvm, st_us_op, st_us_check)
++GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
++GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
++GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
+ 
+ /*
+  *** stride load and store
+@@ -647,28 +640,16 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
+     return true;
+ }
+ 
+-static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
++static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+ {
+     uint32_t data = 0;
+     gen_helper_ldst_stride *fn;
+-    static gen_helper_ldst_stride * const fns[7][4] = {
+-        { gen_helper_vlsb_v_b,  gen_helper_vlsb_v_h,
+-          gen_helper_vlsb_v_w,  gen_helper_vlsb_v_d },
+-        { NULL,                 gen_helper_vlsh_v_h,
+-          gen_helper_vlsh_v_w,  gen_helper_vlsh_v_d },
+-        { NULL,                 NULL,
+-          gen_helper_vlsw_v_w,  gen_helper_vlsw_v_d },
+-        { gen_helper_vlse_v_b,  gen_helper_vlse_v_h,
+-          gen_helper_vlse_v_w,  gen_helper_vlse_v_d },
+-        { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h,
+-          gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d },
+-        { NULL,                 gen_helper_vlshu_v_h,
+-          gen_helper_vlshu_v_w, gen_helper_vlshu_v_d },
+-        { NULL,                 NULL,
+-          gen_helper_vlswu_v_w, gen_helper_vlswu_v_d },
++    static gen_helper_ldst_stride * const fns[4] = {
++        gen_helper_vlse8_v, gen_helper_vlse16_v,
++        gen_helper_vlse32_v, gen_helper_vlse64_v
+     };
+ 
+-    fn =  fns[seq][s->sew];
++    fn = fns[eew];
+     if (fn == NULL) {
+         return false;
+     }
+@@ -679,42 +660,32 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+     return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
+ }
+ 
+-static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
++static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_nf(s, a->nf));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_load(s, a->rd, a->nf, a->vm, eew);
+ }
+ 
+-GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check)
+-GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check)
+-GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check)
+-GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check)
+-GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check)
+-GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check)
+-GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check)
++GEN_VEXT_TRANS(vlse8_v,  MO_8,  rnfvm, ld_stride_op, ld_stride_check)
++GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
++GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
++GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
+ 
+-static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
++static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+ {
+     uint32_t data = 0;
+     gen_helper_ldst_stride *fn;
+-    static gen_helper_ldst_stride * const fns[4][4] = {
++    static gen_helper_ldst_stride * const fns[4] = {
+         /* masked stride store */
+-        { gen_helper_vssb_v_b,  gen_helper_vssb_v_h,
+-          gen_helper_vssb_v_w,  gen_helper_vssb_v_d },
+-        { NULL,                 gen_helper_vssh_v_h,
+-          gen_helper_vssh_v_w,  gen_helper_vssh_v_d },
+-        { NULL,                 NULL,
+-          gen_helper_vssw_v_w,  gen_helper_vssw_v_d },
+-        { gen_helper_vsse_v_b,  gen_helper_vsse_v_h,
+-          gen_helper_vsse_v_w,  gen_helper_vsse_v_d }
++        gen_helper_vsse8_v,  gen_helper_vsse16_v,
++        gen_helper_vsse32_v,  gen_helper_vsse64_v
+     };
+ 
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+-    fn =  fns[seq][s->sew];
++    fn = fns[eew];
+     if (fn == NULL) {
+         return false;
+     }
+@@ -722,17 +693,17 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+     return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
+ }
+ 
+-static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
++static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_nf(s, a->nf));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_store(s, a->rd, a->nf, eew);
+ }
+ 
+-GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check)
+-GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check)
+-GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check)
+-GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check)
++GEN_VEXT_TRANS(vsse8_v,  MO_8,  rnfvm, st_stride_op, st_stride_check)
++GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
++GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
++GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
+ 
+ /*
+  *** index load and store
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index e8912ee8fe..ad45dd9006 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -183,38 +183,18 @@ static inline int vext_elem_mask(void *v0, int index)
+ typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr,
+                                uint32_t idx, void *vd, uintptr_t retaddr);
+ 
+-#define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF)     \
++#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF)            \
+ static void NAME(CPURISCVState *env, abi_ptr addr,         \
+                  uint32_t idx, void *vd, uintptr_t retaddr)\
+ {                                                          \
+-    MTYPE data;                                            \
+     ETYPE *cur = ((ETYPE *)vd + H(idx));                   \
+-    data = cpu_##LDSUF##_data_ra(env, addr, retaddr);      \
+-    *cur = data;                                           \
++    *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr);      \
+ }                                                          \
+ 
+-GEN_VEXT_LD_ELEM(ldb_b, int8_t,  int8_t,  H1, ldsb)
+-GEN_VEXT_LD_ELEM(ldb_h, int8_t,  int16_t, H2, ldsb)
+-GEN_VEXT_LD_ELEM(ldb_w, int8_t,  int32_t, H4, ldsb)
+-GEN_VEXT_LD_ELEM(ldb_d, int8_t,  int64_t, H8, ldsb)
+-GEN_VEXT_LD_ELEM(ldh_h, int16_t, int16_t, H2, ldsw)
+-GEN_VEXT_LD_ELEM(ldh_w, int16_t, int32_t, H4, ldsw)
+-GEN_VEXT_LD_ELEM(ldh_d, int16_t, int64_t, H8, ldsw)
+-GEN_VEXT_LD_ELEM(ldw_w, int32_t, int32_t, H4, ldl)
+-GEN_VEXT_LD_ELEM(ldw_d, int32_t, int64_t, H8, ldl)
+-GEN_VEXT_LD_ELEM(lde_b, int8_t,  int8_t,  H1, ldsb)
+-GEN_VEXT_LD_ELEM(lde_h, int16_t, int16_t, H2, ldsw)
+-GEN_VEXT_LD_ELEM(lde_w, int32_t, int32_t, H4, ldl)
+-GEN_VEXT_LD_ELEM(lde_d, int64_t, int64_t, H8, ldq)
+-GEN_VEXT_LD_ELEM(ldbu_b, uint8_t,  uint8_t,  H1, ldub)
+-GEN_VEXT_LD_ELEM(ldbu_h, uint8_t,  uint16_t, H2, ldub)
+-GEN_VEXT_LD_ELEM(ldbu_w, uint8_t,  uint32_t, H4, ldub)
+-GEN_VEXT_LD_ELEM(ldbu_d, uint8_t,  uint64_t, H8, ldub)
+-GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, uint16_t, H2, lduw)
+-GEN_VEXT_LD_ELEM(ldhu_w, uint16_t, uint32_t, H4, lduw)
+-GEN_VEXT_LD_ELEM(ldhu_d, uint16_t, uint64_t, H8, lduw)
+-GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, uint32_t, H4, ldl)
+-GEN_VEXT_LD_ELEM(ldwu_d, uint32_t, uint64_t, H8, ldl)
++GEN_VEXT_LD_ELEM(lde_b, int8_t,  H1, ldsb)
++GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw)
++GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl)
++GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq)
+ 
+ #define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF)            \
+ static void NAME(CPURISCVState *env, abi_ptr addr,         \
+@@ -224,15 +204,6 @@ static void NAME(CPURISCVState *env, abi_ptr addr,         \
+     cpu_##STSUF##_data_ra(env, addr, data, retaddr);       \
+ }
+ 
+-GEN_VEXT_ST_ELEM(stb_b, int8_t,  H1, stb)
+-GEN_VEXT_ST_ELEM(stb_h, int16_t, H2, stb)
+-GEN_VEXT_ST_ELEM(stb_w, int32_t, H4, stb)
+-GEN_VEXT_ST_ELEM(stb_d, int64_t, H8, stb)
+-GEN_VEXT_ST_ELEM(sth_h, int16_t, H2, stw)
+-GEN_VEXT_ST_ELEM(sth_w, int32_t, H4, stw)
+-GEN_VEXT_ST_ELEM(sth_d, int64_t, H8, stw)
+-GEN_VEXT_ST_ELEM(stw_w, int32_t, H4, stl)
+-GEN_VEXT_ST_ELEM(stw_d, int64_t, H8, stl)
+ GEN_VEXT_ST_ELEM(ste_b, int8_t,  H1, stb)
+ GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
+ GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
+@@ -246,8 +217,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+                  target_ulong stride, CPURISCVState *env,
+                  uint32_t desc, uint32_t vm,
+                  vext_ldst_elem_fn *ldst_elem,
+-                 uint32_t esz, uint32_t msz, uintptr_t ra,
+-                 MMUAccessType access_type)
++                 uint32_t esz, uintptr_t ra, MMUAccessType access_type)
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+@@ -258,7 +228,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+-        probe_pages(env, base + stride * i, nf * msz, ra, access_type);
++        probe_pages(env, base + stride * i, nf * esz, ra, access_type);
+     }
+     /* do real access */
+     for (i = 0; i < env->vl; i++) {
+@@ -267,71 +237,42 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+             continue;
+         }
+         while (k < nf) {
+-            target_ulong addr = base + stride * i + k * msz;
++            target_ulong addr = base + stride * i + k * esz;
+             ldst_elem(env, addr, i + k * vlmax, vd, ra);
+             k++;
+         }
+     }
+ }
+ 
+-#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN)                 \
++#define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN)                        \
+ void HELPER(NAME)(void *vd, void * v0, target_ulong base,               \
+                   target_ulong stride, CPURISCVState *env,              \
+                   uint32_t desc)                                        \
+ {                                                                       \
+     uint32_t vm = vext_vm(desc);                                        \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN,      \
+-                     sizeof(ETYPE), sizeof(MTYPE),                      \
+-                     GETPC(), MMU_DATA_LOAD);                           \
+-}
+-
+-GEN_VEXT_LD_STRIDE(vlsb_v_b,  int8_t,   int8_t,   ldb_b)
+-GEN_VEXT_LD_STRIDE(vlsb_v_h,  int8_t,   int16_t,  ldb_h)
+-GEN_VEXT_LD_STRIDE(vlsb_v_w,  int8_t,   int32_t,  ldb_w)
+-GEN_VEXT_LD_STRIDE(vlsb_v_d,  int8_t,   int64_t,  ldb_d)
+-GEN_VEXT_LD_STRIDE(vlsh_v_h,  int16_t,  int16_t,  ldh_h)
+-GEN_VEXT_LD_STRIDE(vlsh_v_w,  int16_t,  int32_t,  ldh_w)
+-GEN_VEXT_LD_STRIDE(vlsh_v_d,  int16_t,  int64_t,  ldh_d)
+-GEN_VEXT_LD_STRIDE(vlsw_v_w,  int32_t,  int32_t,  ldw_w)
+-GEN_VEXT_LD_STRIDE(vlsw_v_d,  int32_t,  int64_t,  ldw_d)
+-GEN_VEXT_LD_STRIDE(vlse_v_b,  int8_t,   int8_t,   lde_b)
+-GEN_VEXT_LD_STRIDE(vlse_v_h,  int16_t,  int16_t,  lde_h)
+-GEN_VEXT_LD_STRIDE(vlse_v_w,  int32_t,  int32_t,  lde_w)
+-GEN_VEXT_LD_STRIDE(vlse_v_d,  int64_t,  int64_t,  lde_d)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t,  uint8_t,  ldbu_b)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t,  uint16_t, ldbu_h)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t,  uint32_t, ldbu_w)
+-GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t,  uint64_t, ldbu_d)
+-GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h)
+-GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w)
+-GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d)
+-GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w)
+-GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d)
+-
+-#define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN)                \
++                     sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);            \
++}
++
++GEN_VEXT_LD_STRIDE(vlse8_v,  int8_t,  lde_b)
++GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h)
++GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w)
++GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d)
++
++#define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN)                       \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   target_ulong stride, CPURISCVState *env,              \
+                   uint32_t desc)                                        \
+ {                                                                       \
+     uint32_t vm = vext_vm(desc);                                        \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN,     \
+-                     sizeof(ETYPE), sizeof(MTYPE),                      \
+-                     GETPC(), MMU_DATA_STORE);                          \
+-}
+-
+-GEN_VEXT_ST_STRIDE(vssb_v_b, int8_t,  int8_t,  stb_b)
+-GEN_VEXT_ST_STRIDE(vssb_v_h, int8_t,  int16_t, stb_h)
+-GEN_VEXT_ST_STRIDE(vssb_v_w, int8_t,  int32_t, stb_w)
+-GEN_VEXT_ST_STRIDE(vssb_v_d, int8_t,  int64_t, stb_d)
+-GEN_VEXT_ST_STRIDE(vssh_v_h, int16_t, int16_t, sth_h)
+-GEN_VEXT_ST_STRIDE(vssh_v_w, int16_t, int32_t, sth_w)
+-GEN_VEXT_ST_STRIDE(vssh_v_d, int16_t, int64_t, sth_d)
+-GEN_VEXT_ST_STRIDE(vssw_v_w, int32_t, int32_t, stw_w)
+-GEN_VEXT_ST_STRIDE(vssw_v_d, int32_t, int64_t, stw_d)
+-GEN_VEXT_ST_STRIDE(vsse_v_b, int8_t,  int8_t,  ste_b)
+-GEN_VEXT_ST_STRIDE(vsse_v_h, int16_t, int16_t, ste_h)
+-GEN_VEXT_ST_STRIDE(vsse_v_w, int32_t, int32_t, ste_w)
+-GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d)
++                     sizeof(ETYPE), GETPC(), MMU_DATA_STORE);           \
++}
++
++GEN_VEXT_ST_STRIDE(vsse8_v,  int8_t,  ste_b)
++GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h)
++GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w)
++GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
+ 
+ /*
+  *** unit-stride: access elements stored contiguously in memory
+@@ -340,20 +281,20 @@ GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d)
+ /* unmasked unit-stride load and store operation*/
+ static void
+ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+-             vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t msz,
+-             uintptr_t ra, MMUAccessType access_type)
++             vext_ldst_elem_fn *ldst_elem,
++             uint32_t esz, uintptr_t ra, MMUAccessType access_type)
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+     uint32_t vlmax = vext_maxsz(desc) / esz;
+ 
+     /* probe every access */
+-    probe_pages(env, base, env->vl * nf * msz, ra, access_type);
++    probe_pages(env, base, env->vl * nf * esz, ra, access_type);
+     /* load bytes from guest memory */
+     for (i = 0; i < env->vl; i++) {
+         k = 0;
+         while (k < nf) {
+-            target_ulong addr = base + (i * nf + k) * msz;
++            target_ulong addr = base + (i * nf + k) * esz;
+             ldst_elem(env, addr, i + k * vlmax, vd, ra);
+             k++;
+         }
+@@ -365,76 +306,47 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+  * stride = NF * sizeof (MTYPE)
+  */
+ 
+-#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN)                     \
++#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN)                            \
+ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+                          CPURISCVState *env, uint32_t desc)             \
+ {                                                                       \
+-    uint32_t stride = vext_nf(desc) * sizeof(MTYPE);                    \
++    uint32_t stride = vext_nf(desc) * sizeof(ETYPE);                    \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN,   \
+-                     sizeof(ETYPE), sizeof(MTYPE),                      \
+-                     GETPC(), MMU_DATA_LOAD);                           \
++                     sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);            \
+ }                                                                       \
+                                                                         \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   CPURISCVState *env, uint32_t desc)                    \
+ {                                                                       \
+     vext_ldst_us(vd, base, env, desc, LOAD_FN,                          \
+-                 sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \
+-}
+-
+-GEN_VEXT_LD_US(vlb_v_b,  int8_t,   int8_t,   ldb_b)
+-GEN_VEXT_LD_US(vlb_v_h,  int8_t,   int16_t,  ldb_h)
+-GEN_VEXT_LD_US(vlb_v_w,  int8_t,   int32_t,  ldb_w)
+-GEN_VEXT_LD_US(vlb_v_d,  int8_t,   int64_t,  ldb_d)
+-GEN_VEXT_LD_US(vlh_v_h,  int16_t,  int16_t,  ldh_h)
+-GEN_VEXT_LD_US(vlh_v_w,  int16_t,  int32_t,  ldh_w)
+-GEN_VEXT_LD_US(vlh_v_d,  int16_t,  int64_t,  ldh_d)
+-GEN_VEXT_LD_US(vlw_v_w,  int32_t,  int32_t,  ldw_w)
+-GEN_VEXT_LD_US(vlw_v_d,  int32_t,  int64_t,  ldw_d)
+-GEN_VEXT_LD_US(vle_v_b,  int8_t,   int8_t,   lde_b)
+-GEN_VEXT_LD_US(vle_v_h,  int16_t,  int16_t,  lde_h)
+-GEN_VEXT_LD_US(vle_v_w,  int32_t,  int32_t,  lde_w)
+-GEN_VEXT_LD_US(vle_v_d,  int64_t,  int64_t,  lde_d)
+-GEN_VEXT_LD_US(vlbu_v_b, uint8_t,  uint8_t,  ldbu_b)
+-GEN_VEXT_LD_US(vlbu_v_h, uint8_t,  uint16_t, ldbu_h)
+-GEN_VEXT_LD_US(vlbu_v_w, uint8_t,  uint32_t, ldbu_w)
+-GEN_VEXT_LD_US(vlbu_v_d, uint8_t,  uint64_t, ldbu_d)
+-GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h)
+-GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w)
+-GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d)
+-GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w)
+-GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d)
+-
+-#define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN)                    \
++                 sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);                \
++}
++
++GEN_VEXT_LD_US(vle8_v,  int8_t,  lde_b)
++GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
++GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
++GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
++
++#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN)                           \
+ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+                          CPURISCVState *env, uint32_t desc)             \
+ {                                                                       \
+-    uint32_t stride = vext_nf(desc) * sizeof(MTYPE);                    \
++    uint32_t stride = vext_nf(desc) * sizeof(ETYPE);                    \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN,  \
+-                     sizeof(ETYPE), sizeof(MTYPE),                      \
+-                     GETPC(), MMU_DATA_STORE);                          \
++                     sizeof(ETYPE), GETPC(), MMU_DATA_STORE);           \
+ }                                                                       \
+                                                                         \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   CPURISCVState *env, uint32_t desc)                    \
+ {                                                                       \
+     vext_ldst_us(vd, base, env, desc, STORE_FN,                         \
+-                 sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\
+-}
+-
+-GEN_VEXT_ST_US(vsb_v_b, int8_t,  int8_t , stb_b)
+-GEN_VEXT_ST_US(vsb_v_h, int8_t,  int16_t, stb_h)
+-GEN_VEXT_ST_US(vsb_v_w, int8_t,  int32_t, stb_w)
+-GEN_VEXT_ST_US(vsb_v_d, int8_t,  int64_t, stb_d)
+-GEN_VEXT_ST_US(vsh_v_h, int16_t, int16_t, sth_h)
+-GEN_VEXT_ST_US(vsh_v_w, int16_t, int32_t, sth_w)
+-GEN_VEXT_ST_US(vsh_v_d, int16_t, int64_t, sth_d)
+-GEN_VEXT_ST_US(vsw_v_w, int32_t, int32_t, stw_w)
+-GEN_VEXT_ST_US(vsw_v_d, int32_t, int64_t, stw_d)
+-GEN_VEXT_ST_US(vse_v_b, int8_t,  int8_t , ste_b)
+-GEN_VEXT_ST_US(vse_v_h, int16_t, int16_t, ste_h)
+-GEN_VEXT_ST_US(vse_v_w, int32_t, int32_t, ste_w)
+-GEN_VEXT_ST_US(vse_v_d, int64_t, int64_t, ste_d)
++                 sizeof(ETYPE), GETPC(), MMU_DATA_STORE);               \
++}
++
++GEN_VEXT_ST_US(vse8_v,  int8_t,  ste_b)
++GEN_VEXT_ST_US(vse16_v, int16_t, ste_h)
++GEN_VEXT_ST_US(vse32_v, int32_t, ste_w)
++GEN_VEXT_ST_US(vse64_v, int64_t, ste_d)
+ 
+ /*
+  *** index: access vector element from indexed memory
+-- 
+2.33.1
+

+ 527 - 0
recipes-devtools/qemu/qemu/0027-target-riscv-rvv-1.0-index-load-and-store-instructio.patch

@@ -0,0 +1,527 @@
+From 4425aa70a79cdb195cc7dce9f3d44090609c903d Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 14 Aug 2020 18:07:31 +0800
+Subject: [PATCH 027/107] target/riscv: rvv-1.0: index load and store
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/helper.h                   |  67 ++++----
+ target/riscv/insn32.decode              |  21 ++-
+ target/riscv/insn_trans/trans_rvv.c.inc | 209 ++++++++++++++++--------
+ target/riscv/vector_helper.c            |  89 +++++-----
+ 4 files changed, 222 insertions(+), 164 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 3f4d460054..927d28d53a 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -137,41 +137,38 @@ DEF_HELPER_6(vsse8_v, void, ptr, ptr, tl, tl, env, i32)
+ DEF_HELPER_6(vsse16_v, void, ptr, ptr, tl, tl, env, i32)
+ DEF_HELPER_6(vsse32_v, void, ptr, ptr, tl, tl, env, i32)
+ DEF_HELPER_6(vsse64_v, void, ptr, ptr, tl, tl, env, i32)
+-DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxbu_v_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxbu_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxbu_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxbu_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxhu_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxhu_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxhu_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxwu_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vlxwu_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vsxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vlxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei8_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei8_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei16_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei16_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei32_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei32_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vsxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 03a1f6e53e..05c3c18028 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -268,18 +268,17 @@ vlbuff_v   ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+ vlhuff_v   ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+ vlwuff_v   ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+ 
+-vlxb_v     ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
+-vlxh_v     ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
+-vlxw_v     ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm
+-vlxe_v     ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm
+-vlxbu_v    ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
+-vlxhu_v    ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
+-vlxwu_v    ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm
++# Vector ordered-indexed and unordered-indexed load insns.
++vlxei8_v      ... 0-1 . ..... ..... 000 ..... 0000111 @r_nfvm
++vlxei16_v     ... 0-1 . ..... ..... 101 ..... 0000111 @r_nfvm
++vlxei32_v     ... 0-1 . ..... ..... 110 ..... 0000111 @r_nfvm
++vlxei64_v     ... 0-1 . ..... ..... 111 ..... 0000111 @r_nfvm
++
+ # Vector ordered-indexed and unordered-indexed store insns.
+-vsxb_v     ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm
+-vsxh_v     ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm
+-vsxw_v     ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm
+-vsxe_v     ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm
++vsxei8_v      ... 0-1 . ..... ..... 000 ..... 0100111 @r_nfvm
++vsxei16_v     ... 0-1 . ..... ..... 101 ..... 0100111 @r_nfvm
++vsxei32_v     ... 0-1 . ..... ..... 110 ..... 0100111 @r_nfvm
++vsxei64_v     ... 0-1 . ..... ..... 111 ..... 0100111 @r_nfvm
+ 
+ #*** Vector AMO operations are encoded under the standard AMO major opcode ***
+ vamoswapw_v     00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index e4c83cf74d..74dd7ee387 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -126,12 +126,6 @@ static bool require_noover(const int8_t dst, const int8_t dst_lmul,
+     return !is_overlapped(dst, dst_size, src, src_size);
+ }
+ 
+-static bool require_noover_seg(const int8_t dst, const int8_t nf,
+-                               const int8_t src)
+-{
+-    return !is_overlapped(dst, nf, src, 1);
+-}
+-
+ static bool do_vsetvl(DisasContext *ctx, int rd, int rs1, TCGv s2)
+ {
+     TCGv s1, dst;
+@@ -224,9 +218,76 @@ static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
+     return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
+ }
+ 
+-static bool vext_check_isa_ill(DisasContext *s)
++/*
++ * Vector indexed, indexed segment store check function.
++ *
++ * Rules to be checked here:
++ *   1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
++ *   2. Index vector register number is multiples of EMUL.
++ *      (Section 3.3.2, 7.3)
++ *   3. Destination vector register number is multiples of LMUL.
++ *      (Section 3.3.2, 7.3)
++ *   4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
++ *   5. Vector register numbers accessed by the segment load or store
++ *      cannot increment past 31. (Section 7.8)
++ */
++static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
++                                uint8_t eew)
+ {
+-    return !s->vill;
++    int8_t emul = eew - s->sew + s->lmul;
++    return (emul >= -3 && emul <= 3) &&
++            require_align(vs2, emul) &&
++            require_align(vd, s->lmul) &&
++            require_nf(vd, nf, s->lmul);
++}
++
++/*
++ * Vector indexed, indexed segment load check function.
++ *
++ * Rules to be checked here:
++ *   1. All rules applies to store instructions are applies
++ *      to load instructions.
++ *   2. Destination vector register group for a masked vector
++ *      instruction cannot overlap the source mask register (v0).
++ *      (Section 5.3)
++ *   3. Destination vector register cannot overlap a source vector
++ *      register (vs2) group.
++ *      (Section 5.2)
++ *   4. Destination vector register groups cannot overlap
++ *      the source vector register (vs2) group for
++ *      indexed segment load instructions. (Section 7.8.3)
++ */
++static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
++                                int nf, int vm, uint8_t eew)
++{
++    int8_t seg_vd;
++    int8_t emul = eew - s->sew + s->lmul;
++    bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
++        require_vm(vm, vd);
++
++    /* Each segment register group has to follow overlap rules. */
++    for (int i = 0; i < nf; ++i) {
++        seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
++
++        if (eew > s->sew) {
++            if (seg_vd != vs2) {
++                ret &= require_noover(seg_vd, s->lmul, vs2, emul);
++            }
++        } else if (eew < s->sew) {
++            ret &= require_noover(seg_vd, s->lmul, vs2, emul);
++        }
++
++        /*
++         * Destination vector register groups cannot overlap
++         * the source vector register (vs2) group for
++         * indexed segment load instructions.
++         */
++        if (nf > 1) {
++            ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
++                                  vs2, 1 << MAX(emul, 0));
++        }
++    }
++    return ret;
+ }
+ 
+ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
+@@ -747,31 +808,38 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+     return true;
+ }
+ 
+-static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
++static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+ {
+     uint32_t data = 0;
+     gen_helper_ldst_index *fn;
+-    static gen_helper_ldst_index * const fns[7][4] = {
+-        { gen_helper_vlxb_v_b,  gen_helper_vlxb_v_h,
+-          gen_helper_vlxb_v_w,  gen_helper_vlxb_v_d },
+-        { NULL,                 gen_helper_vlxh_v_h,
+-          gen_helper_vlxh_v_w,  gen_helper_vlxh_v_d },
+-        { NULL,                 NULL,
+-          gen_helper_vlxw_v_w,  gen_helper_vlxw_v_d },
+-        { gen_helper_vlxe_v_b,  gen_helper_vlxe_v_h,
+-          gen_helper_vlxe_v_w,  gen_helper_vlxe_v_d },
+-        { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h,
+-          gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d },
+-        { NULL,                 gen_helper_vlxhu_v_h,
+-          gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d },
+-        { NULL,                 NULL,
+-          gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d },
++    static gen_helper_ldst_index * const fns[4][4] = {
++        /*
++         * offset vector register group EEW = 8,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vlxei8_8_v,  gen_helper_vlxei8_16_v,
++          gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
++        /*
++         * offset vector register group EEW = 16,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
++          gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
++        /*
++         * offset vector register group EEW = 32,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
++          gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
++        /*
++         * offset vector register group EEW = 64,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
++          gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
+     };
+ 
+-    fn =  fns[seq][s->sew];
+-    if (fn == NULL) {
+-        return false;
+-    }
++    fn = fns[eew][s->sew];
+ 
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+@@ -779,50 +847,50 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+     return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
+ }
+ 
+-/*
+- * For vector indexed segment loads, the destination vector register
+- * groups cannot overlap the source vector register group (specified by
+- * `vs2`), else an illegal instruction exception is raised.
+- */
+-static bool ld_index_check(DisasContext *s, arg_rnfvm* a)
++static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_nf(s, a->nf) &&
+-            ((a->nf == 1) ||
+-             vext_check_overlap_group(a->rd, a->nf << s->lmul,
+-                                      a->rs2, 1 << s->lmul)));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
+ }
+ 
+-GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check)
+-GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check)
+-GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check)
+-GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check)
+-GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check)
+-GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check)
+-GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check)
++GEN_VEXT_TRANS(vlxei8_v,  MO_8,  rnfvm, ld_index_op, ld_index_check)
++GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
++GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
++GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
+ 
+-static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
++static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+ {
+     uint32_t data = 0;
+     gen_helper_ldst_index *fn;
+     static gen_helper_ldst_index * const fns[4][4] = {
+-        { gen_helper_vsxb_v_b,  gen_helper_vsxb_v_h,
+-          gen_helper_vsxb_v_w,  gen_helper_vsxb_v_d },
+-        { NULL,                 gen_helper_vsxh_v_h,
+-          gen_helper_vsxh_v_w,  gen_helper_vsxh_v_d },
+-        { NULL,                 NULL,
+-          gen_helper_vsxw_v_w,  gen_helper_vsxw_v_d },
+-        { gen_helper_vsxe_v_b,  gen_helper_vsxe_v_h,
+-          gen_helper_vsxe_v_w,  gen_helper_vsxe_v_d }
++        /*
++         * offset vector register group EEW = 8,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vsxei8_8_v,  gen_helper_vsxei8_16_v,
++          gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
++        /*
++         * offset vector register group EEW = 16,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
++          gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
++        /*
++         * offset vector register group EEW = 32,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
++          gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
++        /*
++         * offset vector register group EEW = 64,
++         * data vector register group EEW = SEW
++         */
++        { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
++          gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
+     };
+ 
+-    fn =  fns[seq][s->sew];
+-    if (fn == NULL) {
+-        return false;
+-    }
++    fn = fns[eew][s->sew];
+ 
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+@@ -830,18 +898,17 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+     return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
+ }
+ 
+-static bool st_index_check(DisasContext *s, arg_rnfvm* a)
++static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
+ {
+-    return (vext_check_isa_ill(s) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            vext_check_nf(s, a->nf));
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
+ }
+ 
+-GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check)
+-GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check)
+-GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check)
+-GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check)
++GEN_VEXT_TRANS(vsxei8_v,  MO_8,  rnfvm, st_index_op, st_index_check)
++GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
++GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
++GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
+ 
+ /*
+  *** unit stride fault-only-first load
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index ad45dd9006..95d367489b 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -371,8 +371,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+                 void *vs2, CPURISCVState *env, uint32_t desc,
+                 vext_get_index_addr get_index_addr,
+                 vext_ldst_elem_fn *ldst_elem,
+-                uint32_t esz, uint32_t msz, uintptr_t ra,
+-                MMUAccessType access_type)
++                uint32_t esz, uintptr_t ra, MMUAccessType access_type)
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+@@ -384,7 +383,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+-        probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra,
++        probe_pages(env, get_index_addr(base, i, vs2), nf * esz, ra,
+                     access_type);
+     }
+     /* load bytes from guest memory */
+@@ -394,67 +393,63 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+             continue;
+         }
+         while (k < nf) {
+-            abi_ptr addr = get_index_addr(base, i, vs2) + k * msz;
++            abi_ptr addr = get_index_addr(base, i, vs2) + k * esz;
+             ldst_elem(env, addr, i + k * vlmax, vd, ra);
+             k++;
+         }
+     }
+ }
+ 
+-#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN)           \
++#define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN)                  \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                   \
+                   void *vs2, CPURISCVState *env, uint32_t desc)            \
+ {                                                                          \
+     vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN,                \
+-                    LOAD_FN, sizeof(ETYPE), sizeof(MTYPE),                 \
+-                    GETPC(), MMU_DATA_LOAD);                               \
+-}
+-
+-GEN_VEXT_LD_INDEX(vlxb_v_b,  int8_t,   int8_t,   idx_b, ldb_b)
+-GEN_VEXT_LD_INDEX(vlxb_v_h,  int8_t,   int16_t,  idx_h, ldb_h)
+-GEN_VEXT_LD_INDEX(vlxb_v_w,  int8_t,   int32_t,  idx_w, ldb_w)
+-GEN_VEXT_LD_INDEX(vlxb_v_d,  int8_t,   int64_t,  idx_d, ldb_d)
+-GEN_VEXT_LD_INDEX(vlxh_v_h,  int16_t,  int16_t,  idx_h, ldh_h)
+-GEN_VEXT_LD_INDEX(vlxh_v_w,  int16_t,  int32_t,  idx_w, ldh_w)
+-GEN_VEXT_LD_INDEX(vlxh_v_d,  int16_t,  int64_t,  idx_d, ldh_d)
+-GEN_VEXT_LD_INDEX(vlxw_v_w,  int32_t,  int32_t,  idx_w, ldw_w)
+-GEN_VEXT_LD_INDEX(vlxw_v_d,  int32_t,  int64_t,  idx_d, ldw_d)
+-GEN_VEXT_LD_INDEX(vlxe_v_b,  int8_t,   int8_t,   idx_b, lde_b)
+-GEN_VEXT_LD_INDEX(vlxe_v_h,  int16_t,  int16_t,  idx_h, lde_h)
+-GEN_VEXT_LD_INDEX(vlxe_v_w,  int32_t,  int32_t,  idx_w, lde_w)
+-GEN_VEXT_LD_INDEX(vlxe_v_d,  int64_t,  int64_t,  idx_d, lde_d)
+-GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t,  uint8_t,  idx_b, ldbu_b)
+-GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t,  uint16_t, idx_h, ldbu_h)
+-GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t,  uint32_t, idx_w, ldbu_w)
+-GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t,  uint64_t, idx_d, ldbu_d)
+-GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h)
+-GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w)
+-GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d)
+-GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w)
+-GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d)
+-
+-#define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\
++                    LOAD_FN, sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);       \
++}
++
++GEN_VEXT_LD_INDEX(vlxei8_8_v,   int8_t,  idx_b, lde_b)
++GEN_VEXT_LD_INDEX(vlxei8_16_v,  int16_t, idx_b, lde_h)
++GEN_VEXT_LD_INDEX(vlxei8_32_v,  int32_t, idx_b, lde_w)
++GEN_VEXT_LD_INDEX(vlxei8_64_v,  int64_t, idx_b, lde_d)
++GEN_VEXT_LD_INDEX(vlxei16_8_v,  int8_t,  idx_h, lde_b)
++GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h)
++GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w)
++GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d)
++GEN_VEXT_LD_INDEX(vlxei32_8_v,  int8_t,  idx_w, lde_b)
++GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h)
++GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w)
++GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d)
++GEN_VEXT_LD_INDEX(vlxei64_8_v,  int8_t,  idx_d, lde_b)
++GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h)
++GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w)
++GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d)
++
++#define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN)       \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,         \
+                   void *vs2, CPURISCVState *env, uint32_t desc)  \
+ {                                                                \
+     vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN,      \
+-                    STORE_FN, sizeof(ETYPE), sizeof(MTYPE),      \
++                    STORE_FN, sizeof(ETYPE),                     \
+                     GETPC(), MMU_DATA_STORE);                    \
+ }
+ 
+-GEN_VEXT_ST_INDEX(vsxb_v_b, int8_t,  int8_t,  idx_b, stb_b)
+-GEN_VEXT_ST_INDEX(vsxb_v_h, int8_t,  int16_t, idx_h, stb_h)
+-GEN_VEXT_ST_INDEX(vsxb_v_w, int8_t,  int32_t, idx_w, stb_w)
+-GEN_VEXT_ST_INDEX(vsxb_v_d, int8_t,  int64_t, idx_d, stb_d)
+-GEN_VEXT_ST_INDEX(vsxh_v_h, int16_t, int16_t, idx_h, sth_h)
+-GEN_VEXT_ST_INDEX(vsxh_v_w, int16_t, int32_t, idx_w, sth_w)
+-GEN_VEXT_ST_INDEX(vsxh_v_d, int16_t, int64_t, idx_d, sth_d)
+-GEN_VEXT_ST_INDEX(vsxw_v_w, int32_t, int32_t, idx_w, stw_w)
+-GEN_VEXT_ST_INDEX(vsxw_v_d, int32_t, int64_t, idx_d, stw_d)
+-GEN_VEXT_ST_INDEX(vsxe_v_b, int8_t,  int8_t,  idx_b, ste_b)
+-GEN_VEXT_ST_INDEX(vsxe_v_h, int16_t, int16_t, idx_h, ste_h)
+-GEN_VEXT_ST_INDEX(vsxe_v_w, int32_t, int32_t, idx_w, ste_w)
+-GEN_VEXT_ST_INDEX(vsxe_v_d, int64_t, int64_t, idx_d, ste_d)
++GEN_VEXT_ST_INDEX(vsxei8_8_v,   int8_t,  idx_b, ste_b)
++GEN_VEXT_ST_INDEX(vsxei8_16_v,  int16_t, idx_b, ste_h)
++GEN_VEXT_ST_INDEX(vsxei8_32_v,  int32_t, idx_b, ste_w)
++GEN_VEXT_ST_INDEX(vsxei8_64_v,  int64_t, idx_b, ste_d)
++GEN_VEXT_ST_INDEX(vsxei16_8_v,  int8_t,  idx_h, ste_b)
++GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h)
++GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w)
++GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d)
++GEN_VEXT_ST_INDEX(vsxei32_8_v,  int8_t,  idx_w, ste_b)
++GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h)
++GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w)
++GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d)
++GEN_VEXT_ST_INDEX(vsxei64_8_v,  int8_t,  idx_d, ste_b)
++GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h)
++GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w)
++GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d)
+ 
+ /*
+  *** unit-stride fault-only-fisrt load instructions
+-- 
+2.33.1
+

+ 38 - 0
recipes-devtools/qemu/qemu/0028-target-riscv-rvv-1.0-fix-address-index-overflow-bug-.patch

@@ -0,0 +1,38 @@
+From e5371e625fb79eab06d70b03ca410102f56fd7ca Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 9 Jul 2020 21:25:17 +0800
+Subject: [PATCH 028/107] target/riscv: rvv-1.0: fix address index overflow bug
+ of indexed load/store insns
+
+Replace ETYPE from signed int to unsigned int to prevent index overflow
+issue, which would lead to wrong index address.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/vector_helper.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 95d367489b..6030fa6fbb 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -361,10 +361,10 @@ static target_ulong NAME(target_ulong base,            \
+     return (base + *((ETYPE *)vs2 + H(idx)));          \
+ }
+ 
+-GEN_VEXT_GET_INDEX_ADDR(idx_b, int8_t,  H1)
+-GEN_VEXT_GET_INDEX_ADDR(idx_h, int16_t, H2)
+-GEN_VEXT_GET_INDEX_ADDR(idx_w, int32_t, H4)
+-GEN_VEXT_GET_INDEX_ADDR(idx_d, int64_t, H8)
++GEN_VEXT_GET_INDEX_ADDR(idx_b, uint8_t,  H1)
++GEN_VEXT_GET_INDEX_ADDR(idx_h, uint16_t, H2)
++GEN_VEXT_GET_INDEX_ADDR(idx_w, uint32_t, H4)
++GEN_VEXT_GET_INDEX_ADDR(idx_d, uint64_t, H8)
+ 
+ static inline void
+ vext_ldst_index(void *vd, void *v0, target_ulong base,
+-- 
+2.33.1
+

+ 245 - 0
recipes-devtools/qemu/qemu/0029-target-riscv-rvv-1.0-fault-only-first-unit-stride-lo.patch

@@ -0,0 +1,245 @@
+From 05823ddce2559fb9cb01fd318a619c67f05f7295 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 8 Jul 2020 11:49:28 +0800
+Subject: [PATCH 029/107] target/riscv: rvv-1.0: fault-only-first unit stride
+ load
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/helper.h                   | 27 +++---------
+ target/riscv/insn32.decode              | 14 +++----
+ target/riscv/insn_trans/trans_rvv.c.inc | 33 ++++-----------
+ target/riscv/vector_helper.c            | 56 +++++++++----------------
+ 4 files changed, 39 insertions(+), 91 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 927d28d53a..bbea5403fb 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -169,28 +169,11 @@ DEF_HELPER_6(vsxei64_8_v, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vsxei64_16_v, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vsxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vsxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbff_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhff_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhff_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhff_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwff_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwff_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vleff_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vleff_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vleff_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vleff_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbuff_v_b, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbuff_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbuff_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlbuff_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhuff_v_h, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32)
+-DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle8ff_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle16ff_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle32ff_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vle64ff_v, void, ptr, ptr, tl, env, i32)
++
+ #ifdef TARGET_RISCV64
+ DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 05c3c18028..97b9b8f5f5 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -260,14 +260,6 @@ vsse16_v    ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
+ vsse32_v    ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
+ vsse64_v    ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
+ 
+-vlbff_v    ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+-vlhff_v    ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+-vlwff_v    ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+-vleff_v    ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
+-vlbuff_v   ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+-vlhuff_v   ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+-vlwuff_v   ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+-
+ # Vector ordered-indexed and unordered-indexed load insns.
+ vlxei8_v      ... 0-1 . ..... ..... 000 ..... 0000111 @r_nfvm
+ vlxei16_v     ... 0-1 . ..... ..... 101 ..... 0000111 @r_nfvm
+@@ -280,6 +272,12 @@ vsxei16_v     ... 0-1 . ..... ..... 101 ..... 0100111 @r_nfvm
+ vsxei32_v     ... 0-1 . ..... ..... 110 ..... 0100111 @r_nfvm
+ vsxei64_v     ... 0-1 . ..... ..... 111 ..... 0100111 @r_nfvm
+ 
++# Vector unit-stride fault-only-first load insns.
++vle8ff_v      ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
++vle16ff_v     ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
++vle32ff_v     ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
++vle64ff_v     ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
++
+ #*** Vector AMO operations are encoded under the standard AMO major opcode ***
+ vamoswapw_v     00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
+ vamoaddw_v      00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 74dd7ee387..14974ce288 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -943,28 +943,16 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+     return true;
+ }
+ 
+-static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
++static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
+ {
+     uint32_t data = 0;
+     gen_helper_ldst_us *fn;
+-    static gen_helper_ldst_us * const fns[7][4] = {
+-        { gen_helper_vlbff_v_b,  gen_helper_vlbff_v_h,
+-          gen_helper_vlbff_v_w,  gen_helper_vlbff_v_d },
+-        { NULL,                  gen_helper_vlhff_v_h,
+-          gen_helper_vlhff_v_w,  gen_helper_vlhff_v_d },
+-        { NULL,                  NULL,
+-          gen_helper_vlwff_v_w,  gen_helper_vlwff_v_d },
+-        { gen_helper_vleff_v_b,  gen_helper_vleff_v_h,
+-          gen_helper_vleff_v_w,  gen_helper_vleff_v_d },
+-        { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h,
+-          gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d },
+-        { NULL,                  gen_helper_vlhuff_v_h,
+-          gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d },
+-        { NULL,                  NULL,
+-          gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d }
++    static gen_helper_ldst_us * const fns[4] = {
++        gen_helper_vle8ff_v, gen_helper_vle16ff_v,
++        gen_helper_vle32ff_v, gen_helper_vle64ff_v
+     };
+ 
+-    fn =  fns[seq][s->sew];
++    fn = fns[eew];
+     if (fn == NULL) {
+         return false;
+     }
+@@ -975,13 +963,10 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+     return ldff_trans(a->rd, a->rs1, data, fn, s);
+ }
+ 
+-GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check)
+-GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check)
+-GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check)
+-GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check)
+-GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check)
+-GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check)
+-GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check)
++GEN_VEXT_TRANS(vle8ff_v,  MO_8,  r2nfvm, ldff_op, ld_us_check)
++GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
++GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
++GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
+ 
+ /*
+  *** vector atomic operation
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 6030fa6fbb..affe024600 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -458,7 +458,7 @@ static inline void
+ vext_ldff(void *vd, void *v0, target_ulong base,
+           CPURISCVState *env, uint32_t desc,
+           vext_ldst_elem_fn *ldst_elem,
+-          uint32_t esz, uint32_t msz, uintptr_t ra)
++          uint32_t esz, uintptr_t ra)
+ {
+     void *host;
+     uint32_t i, k, vl = 0;
+@@ -472,24 +472,24 @@ vext_ldff(void *vd, void *v0, target_ulong base,
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+-        addr = base + nf * i * msz;
++        addr = base + nf * i * esz;
+         if (i == 0) {
+-            probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD);
++            probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
+         } else {
+             /* if it triggers an exception, no need to check watchpoint */
+-            remain = nf * msz;
++            remain = nf * esz;
+             while (remain > 0) {
+                 offset = -(addr | TARGET_PAGE_MASK);
+                 host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD,
+                                          cpu_mmu_index(env, false));
+                 if (host) {
+ #ifdef CONFIG_USER_ONLY
+-                    if (page_check_range(addr, nf * msz, PAGE_READ) < 0) {
++                    if (page_check_range(addr, nf * esz, PAGE_READ) < 0) {
+                         vl = i;
+                         goto ProbeSuccess;
+                     }
+ #else
+-                    probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD);
++                    probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
+ #endif
+                 } else {
+                     vl = i;
+@@ -514,43 +514,25 @@ ProbeSuccess:
+             continue;
+         }
+         while (k < nf) {
+-            target_ulong addr = base + (i * nf + k) * msz;
++            target_ulong addr = base + (i * nf + k) * esz;
+             ldst_elem(env, addr, i + k * vlmax, vd, ra);
+             k++;
+         }
+     }
+ }
+ 
+-#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN)               \
+-void HELPER(NAME)(void *vd, void *v0, target_ulong base,         \
+-                  CPURISCVState *env, uint32_t desc)             \
+-{                                                                \
+-    vext_ldff(vd, v0, base, env, desc, LOAD_FN,                  \
+-              sizeof(ETYPE), sizeof(MTYPE), GETPC());            \
+-}
+-
+-GEN_VEXT_LDFF(vlbff_v_b,  int8_t,   int8_t,   ldb_b)
+-GEN_VEXT_LDFF(vlbff_v_h,  int8_t,   int16_t,  ldb_h)
+-GEN_VEXT_LDFF(vlbff_v_w,  int8_t,   int32_t,  ldb_w)
+-GEN_VEXT_LDFF(vlbff_v_d,  int8_t,   int64_t,  ldb_d)
+-GEN_VEXT_LDFF(vlhff_v_h,  int16_t,  int16_t,  ldh_h)
+-GEN_VEXT_LDFF(vlhff_v_w,  int16_t,  int32_t,  ldh_w)
+-GEN_VEXT_LDFF(vlhff_v_d,  int16_t,  int64_t,  ldh_d)
+-GEN_VEXT_LDFF(vlwff_v_w,  int32_t,  int32_t,  ldw_w)
+-GEN_VEXT_LDFF(vlwff_v_d,  int32_t,  int64_t,  ldw_d)
+-GEN_VEXT_LDFF(vleff_v_b,  int8_t,   int8_t,   lde_b)
+-GEN_VEXT_LDFF(vleff_v_h,  int16_t,  int16_t,  lde_h)
+-GEN_VEXT_LDFF(vleff_v_w,  int32_t,  int32_t,  lde_w)
+-GEN_VEXT_LDFF(vleff_v_d,  int64_t,  int64_t,  lde_d)
+-GEN_VEXT_LDFF(vlbuff_v_b, uint8_t,  uint8_t,  ldbu_b)
+-GEN_VEXT_LDFF(vlbuff_v_h, uint8_t,  uint16_t, ldbu_h)
+-GEN_VEXT_LDFF(vlbuff_v_w, uint8_t,  uint32_t, ldbu_w)
+-GEN_VEXT_LDFF(vlbuff_v_d, uint8_t,  uint64_t, ldbu_d)
+-GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h)
+-GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w)
+-GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d)
+-GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w)
+-GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d)
++#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN)               \
++void HELPER(NAME)(void *vd, void *v0, target_ulong base,  \
++                  CPURISCVState *env, uint32_t desc)      \
++{                                                         \
++    vext_ldff(vd, v0, base, env, desc, LOAD_FN,           \
++              sizeof(ETYPE), GETPC());                    \
++}
++
++GEN_VEXT_LDFF(vle8ff_v,  int8_t,  lde_b)
++GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h)
++GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w)
++GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
+ 
+ /*
+  *** Vector AMO Operations (Zvamo)
+-- 
+2.33.1
+

+ 763 - 0
recipes-devtools/qemu/qemu/0030-target-riscv-rvv-1.0-amo-operations.patch

@@ -0,0 +1,763 @@
+From 6bc2f0d5ff1eaa91ea2c82e9bca60c96ad4b3a29 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 14 Aug 2020 18:07:40 +0800
+Subject: [PATCH 030/107] target/riscv: rvv-1.0: amo operations
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/helper.h                   | 100 +++++++---
+ target/riscv/insn32-64.decode           |  18 +-
+ target/riscv/insn32.decode              |  36 +++-
+ target/riscv/insn_trans/trans_rvv.c.inc | 229 +++++++++++++++--------
+ target/riscv/vector_helper.c            | 232 ++++++++++++++++--------
+ 5 files changed, 414 insertions(+), 201 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index bbea5403fb..f26af64d5b 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -174,36 +174,80 @@ DEF_HELPER_5(vle16ff_v, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vle32ff_v, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vle64ff_v, void, ptr, ptr, tl, env, i32)
+ 
++DEF_HELPER_6(vamoswapei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoswapei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoswapei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoswapei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoswapei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoswapei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei16_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei32_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei32_64_v, void, ptr, ptr, tl, ptr, env, i32)
+ #ifdef TARGET_RISCV64
+-DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoaddw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoaddd_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoxorw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoxord_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoandw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoandd_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoorw_v_d,   void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoord_v_d,   void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamominw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamomind_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamomaxw_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamomaxd_v_d,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoswapei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoswapei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoaddei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoxorei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoandei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei64_32_v,  void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamoorei64_64_v,  void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamominuei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei64_32_v, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vamomaxuei64_64_v, void, ptr, ptr, tl, ptr, env, i32)
+ #endif
+-DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoaddw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoxorw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoandw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamoorw_v_w,   void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamominw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamomaxw_v_w,  void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+-
+ DEF_HELPER_6(vadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode
+index 1f5d0b7a5c..bf39c5064f 100644
+--- a/target/riscv/insn32-64.decode
++++ b/target/riscv/insn32-64.decode
+@@ -58,15 +58,15 @@ amominu_d  11000 . . ..... ..... 011 ..... 0101111 @atom_st
+ amomaxu_d  11100 . . ..... ..... 011 ..... 0101111 @atom_st
+ 
+ #*** Vector AMO operations (in addition to Zvamo) ***
+-vamoswapd_v     00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamoaddd_v      00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamoxord_v      00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamoandd_v      01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamoord_v       01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamomind_v      10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamomaxd_v      10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamominud_v     11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+-vamomaxud_v     11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamoswapei64_v  00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamoaddei64_v   00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamoxorei64_v   00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamoandei64_v   01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamoorei64_v    01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamominei64_v   10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamomaxei64_v   10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamominuei64_v  11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
++vamomaxuei64_v  11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+ 
+ # *** RV64F Standard Extension (in addition to RV32F) ***
+ fcvt_l_s   1100000  00010 ..... ... ..... 1010011 @r2_rm
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 97b9b8f5f5..ae406dff3b 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -279,15 +279,33 @@ vle32ff_v     ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+ vle64ff_v     ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
+ 
+ #*** Vector AMO operations are encoded under the standard AMO major opcode ***
+-vamoswapw_v     00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamoaddw_v      00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamoxorw_v      00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamoandw_v      01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamoorw_v       01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamominw_v      10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamomaxw_v      10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamominuw_v     11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+-vamomaxuw_v     11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamoswapei8_v   00001 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamoswapei16_v  00001 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamoswapei32_v  00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamoaddei8_v    00000 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamoaddei16_v   00000 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamoaddei32_v   00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamoxorei8_v    00100 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamoxorei16_v   00100 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamoxorei32_v   00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamoandei8_v    01100 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamoandei16_v   01100 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamoandei32_v   01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamoorei8_v     01000 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamoorei16_v    01000 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamoorei32_v    01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamominei8_v    10000 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamominei16_v   10000 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamominei32_v   10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamomaxei8_v    10100 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamomaxei16_v   10100 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamomaxei32_v   10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamominuei8_v   11000 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamominuei16_v  11000 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamominuei32_v  11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
++vamomaxuei8_v   11100 . . ..... ..... 000 ..... 0101111 @r_wdvm
++vamomaxuei16_v  11100 . . ..... ..... 101 ..... 0101111 @r_wdvm
++vamomaxuei32_v  11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+ 
+ # *** new major opcode OP-V ***
+ vadd_vv         000000 . ..... ..... 000 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 14974ce288..5057dff5eb 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -290,6 +290,52 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
+     return ret;
+ }
+ 
++/*
++ * Vector AMO check function.
++ *
++ * Rules to be checked here:
++ *   1. RVA must supported.
++ *   2. AMO can either operations on 64-bit (RV64 only) or 32-bit words
++ *      in memory:
++ *      For RV32: 32 <= SEW <= 32, EEW <= 32.
++ *      For RV64: 32 <= SEW <= 64, EEW <= 64.
++ *   3. Destination vector register number is multiples of LMUL.
++ *      (Section 3.3.2, 8)
++ *   4. Address vector register number is multiples of EMUL.
++ *      (Section 3.3.2, 8)
++ *   5. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
++ *   6. If wd = 1:
++ *      6.1. Destination vector register group for a masked vector
++ *           instruction cannot overlap the source mask register (v0).
++ *           (Section 5.3)
++ *      6.2. Destination vector register cannot overlap a source vector
++ *           register (vs2) group.
++ *           (Section 5.2)
++ */
++static bool vext_check_amo(DisasContext *s, int vd, int vs2,
++                           int wd, int vm, uint8_t eew)
++{
++    int8_t emul = eew - s->sew + s->lmul;
++    bool ret = has_ext(s, RVA) &&
++               (1 << s->sew >= 4) &&
++               (1 << s->sew <= sizeof(target_ulong)) &&
++               (eew <= (sizeof(target_ulong) << 3))  &&
++               require_align(vd, s->lmul) &&
++               require_align(vs2, emul) &&
++               (emul >= -3 && emul <= 3);
++    if (wd) {
++        ret &= require_vm(vm, vd);
++        if (eew > s->sew) {
++            if (vd != vs2) {
++                ret &= require_noover(vd, s->lmul, vs2, emul);
++            }
++        } else if (eew < s->sew) {
++            ret &= require_noover(vd, s->lmul, vs2, emul);
++        }
++    }
++    return ret;
++}
++
+ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
+ {
+     return require_vm(vm, vd) &&
+@@ -1007,104 +1053,129 @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+     return true;
+ }
+ 
+-static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
++static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t eew, uint8_t seq)
+ {
+     uint32_t data = 0;
+     gen_helper_amo *fn;
+-    static gen_helper_amo *const fnsw[9] = {
+-        /* no atomic operation */
+-        gen_helper_vamoswapw_v_w,
+-        gen_helper_vamoaddw_v_w,
+-        gen_helper_vamoxorw_v_w,
+-        gen_helper_vamoandw_v_w,
+-        gen_helper_vamoorw_v_w,
+-        gen_helper_vamominw_v_w,
+-        gen_helper_vamomaxw_v_w,
+-        gen_helper_vamominuw_v_w,
+-        gen_helper_vamomaxuw_v_w
+-    };
++    static gen_helper_amo *const fns[36][2] = {
++        { gen_helper_vamoswapei8_32_v, gen_helper_vamoswapei8_64_v },
++        { gen_helper_vamoswapei16_32_v, gen_helper_vamoswapei16_64_v },
++        { gen_helper_vamoswapei32_32_v, gen_helper_vamoswapei32_64_v },
++        { gen_helper_vamoaddei8_32_v, gen_helper_vamoaddei8_64_v },
++        { gen_helper_vamoaddei16_32_v, gen_helper_vamoaddei16_64_v },
++        { gen_helper_vamoaddei32_32_v, gen_helper_vamoaddei32_64_v },
++        { gen_helper_vamoxorei8_32_v, gen_helper_vamoxorei8_64_v },
++        { gen_helper_vamoxorei16_32_v, gen_helper_vamoxorei16_64_v },
++        { gen_helper_vamoxorei32_32_v, gen_helper_vamoxorei32_64_v },
++        { gen_helper_vamoandei8_32_v, gen_helper_vamoandei8_64_v },
++        { gen_helper_vamoandei16_32_v, gen_helper_vamoandei16_64_v },
++        { gen_helper_vamoandei32_32_v, gen_helper_vamoandei32_64_v },
++        { gen_helper_vamoorei8_32_v, gen_helper_vamoorei8_64_v },
++        { gen_helper_vamoorei16_32_v, gen_helper_vamoorei16_64_v },
++        { gen_helper_vamoorei32_32_v, gen_helper_vamoorei32_64_v },
++        { gen_helper_vamominei8_32_v, gen_helper_vamominei8_64_v },
++        { gen_helper_vamominei16_32_v, gen_helper_vamominei16_64_v },
++        { gen_helper_vamominei32_32_v, gen_helper_vamominei32_64_v },
++        { gen_helper_vamomaxei8_32_v, gen_helper_vamomaxei8_64_v },
++        { gen_helper_vamomaxei16_32_v, gen_helper_vamomaxei16_64_v },
++        { gen_helper_vamomaxei32_32_v, gen_helper_vamomaxei32_64_v },
++        { gen_helper_vamominuei8_32_v, gen_helper_vamominuei8_64_v },
++        { gen_helper_vamominuei16_32_v, gen_helper_vamominuei16_64_v },
++        { gen_helper_vamominuei32_32_v, gen_helper_vamominuei32_64_v },
++        { gen_helper_vamomaxuei8_32_v, gen_helper_vamomaxuei8_64_v },
++        { gen_helper_vamomaxuei16_32_v, gen_helper_vamomaxuei16_64_v },
++        { gen_helper_vamomaxuei32_32_v, gen_helper_vamomaxuei32_64_v },
+ #ifdef TARGET_RISCV64
+-    static gen_helper_amo *const fnsd[18] = {
+-        gen_helper_vamoswapw_v_d,
+-        gen_helper_vamoaddw_v_d,
+-        gen_helper_vamoxorw_v_d,
+-        gen_helper_vamoandw_v_d,
+-        gen_helper_vamoorw_v_d,
+-        gen_helper_vamominw_v_d,
+-        gen_helper_vamomaxw_v_d,
+-        gen_helper_vamominuw_v_d,
+-        gen_helper_vamomaxuw_v_d,
+-        gen_helper_vamoswapd_v_d,
+-        gen_helper_vamoaddd_v_d,
+-        gen_helper_vamoxord_v_d,
+-        gen_helper_vamoandd_v_d,
+-        gen_helper_vamoord_v_d,
+-        gen_helper_vamomind_v_d,
+-        gen_helper_vamomaxd_v_d,
+-        gen_helper_vamominud_v_d,
+-        gen_helper_vamomaxud_v_d
+-    };
++        { gen_helper_vamoswapei64_32_v, gen_helper_vamoswapei64_64_v },
++        { gen_helper_vamoaddei64_32_v, gen_helper_vamoaddei64_64_v },
++        { gen_helper_vamoxorei64_32_v, gen_helper_vamoxorei64_64_v },
++        { gen_helper_vamoandei64_32_v, gen_helper_vamoandei64_64_v },
++        { gen_helper_vamoorei64_32_v, gen_helper_vamoorei64_64_v },
++        { gen_helper_vamominei64_32_v, gen_helper_vamominei64_64_v },
++        { gen_helper_vamomaxei64_32_v, gen_helper_vamomaxei64_64_v },
++        { gen_helper_vamominuei64_32_v, gen_helper_vamominuei64_64_v },
++        { gen_helper_vamomaxuei64_32_v, gen_helper_vamomaxuei64_64_v }
++#else
++        { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL },
++        { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL },
++        { NULL, NULL }
+ #endif
++    };
+ 
+     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+         gen_helper_exit_atomic(cpu_env);
+         s->base.is_jmp = DISAS_NORETURN;
+         return true;
+-    } else {
+-        if (s->sew == 3) {
+-#ifdef TARGET_RISCV64
+-            fn = fnsd[seq];
+-#else
+-            /* Check done in amo_check(). */
+-            g_assert_not_reached();
+-#endif
+-        } else {
+-            assert(seq < ARRAY_SIZE(fnsw));
+-            fn = fnsw[seq];
+-        }
++    }
++
++    fn = fns[seq][s->sew - 2];
++    if (fn == NULL) {
++        return false;
+     }
+ 
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+     data = FIELD_DP32(data, VDATA, WD, a->wd);
+     return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+ }
++
++static bool amo_check(DisasContext *s, arg_rwdvm* a, uint8_t eew)
++{
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           vext_check_amo(s, a->rd, a->rs2, a->wd, a->vm, eew);
++}
++
++#define GEN_VEXT_AMO_TRANS(NAME, EEW, SEQ, ARGTYPE, OP, CHECK) \
++static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a)   \
++{                                                              \
++    if (CHECK(s, a, EEW)) {                                    \
++        return OP(s, a, EEW, SEQ);                             \
++    }                                                          \
++    return false;                                              \
++}
++
++GEN_VEXT_AMO_TRANS(vamoswapei8_v,  MO_8,  0,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoswapei16_v, MO_16, 1,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoswapei32_v, MO_32, 2,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoaddei8_v,   MO_8,  3,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoaddei16_v,  MO_16, 4,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoaddei32_v,  MO_32, 5,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoxorei8_v,   MO_8,  6,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoxorei16_v,  MO_16, 7,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoxorei32_v,  MO_32, 8,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoandei8_v,   MO_8,  9,  rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoandei16_v,  MO_16, 10, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoandei32_v,  MO_32, 11, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoorei8_v,    MO_8,  12, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoorei16_v,   MO_16, 13, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoorei32_v,   MO_32, 14, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominei8_v,   MO_8,  15, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominei16_v,  MO_16, 16, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominei32_v,  MO_32, 17, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxei8_v,   MO_8,  18, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxei16_v,  MO_16, 19, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxei32_v,  MO_32, 20, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominuei8_v,  MO_8,  21, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominuei16_v, MO_16, 22, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominuei32_v, MO_32, 23, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxuei8_v,  MO_8,  24, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxuei16_v, MO_16, 25, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxuei32_v, MO_32, 26, rwdvm, amo_op, amo_check)
++
+ /*
+- * There are two rules check here.
+- *
+- * 1. SEW must be at least as wide as the AMO memory element size.
+- *
+- * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
++ * Index EEW cannot be greater than XLEN,
++ * else an illegal instruction is raised (Section 8)
+  */
+-static bool amo_check(DisasContext *s, arg_rwdvm* a)
+-{
+-    return (!s->vill && has_ext(s, RVA) &&
+-            (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) &&
+-            vext_check_reg(s, a->rd, false) &&
+-            vext_check_reg(s, a->rs2, false) &&
+-            ((1 << s->sew) <= sizeof(target_ulong)) &&
+-            ((1 << s->sew) >= 4));
+-}
+-
+-GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check)
+ #ifdef TARGET_RISCV64
+-GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check)
+-GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoswapei64_v, MO_64, 27, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoaddei64_v,  MO_64, 28, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoxorei64_v,  MO_64, 29, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoandei64_v,  MO_64, 30, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamoorei64_v,   MO_64, 31, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominei64_v,  MO_64, 32, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxei64_v,  MO_64, 33, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamominuei64_v, MO_64, 34, rwdvm, amo_op, amo_check)
++GEN_VEXT_AMO_TRANS(vamomaxuei64_v, MO_64, 35, rwdvm, amo_op, amo_check)
+ #endif
+ 
+ /*
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index affe024600..8bc3bf77a3 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -541,23 +541,22 @@ typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr,
+                                   uint32_t wd, uint32_t idx, CPURISCVState *env,
+                                   uintptr_t retaddr);
+ 
+-/* no atomic opreation for vector atomic insructions */
++/* no atomic operation for vector atomic instructions */
+ #define DO_SWAP(N, M) (M)
+ #define DO_AND(N, M)  (N & M)
+ #define DO_XOR(N, M)  (N ^ M)
+ #define DO_OR(N, M)   (N | M)
+ #define DO_ADD(N, M)  (N + M)
++#define DO_MAX(N, M)  ((N) >= (M) ? (N) : (M))
++#define DO_MIN(N, M)  ((N) >= (M) ? (M) : (N))
+ 
+-#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \
++#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, MTYPE, H, DO_OP, SUF)    \
+ static void                                                     \
+ vext_##NAME##_noatomic_op(void *vs3, target_ulong addr,         \
+                           uint32_t wd, uint32_t idx,            \
+                           CPURISCVState *env, uintptr_t retaddr)\
+ {                                                               \
+-    typedef int##ESZ##_t ETYPE;                                 \
+-    typedef int##MSZ##_t MTYPE;                                 \
+-    typedef uint##MSZ##_t UMTYPE __attribute__((unused));       \
+-    ETYPE *pe3 = (ETYPE *)vs3 + H(idx);                         \
++    MTYPE *pe3 = (MTYPE *)vs3 + H(idx);                         \
+     MTYPE  a = cpu_ld##SUF##_data(env, addr), b = *pe3;         \
+                                                                 \
+     cpu_st##SUF##_data(env, addr, DO_OP(a, b));                 \
+@@ -566,42 +565,79 @@ vext_##NAME##_noatomic_op(void *vs3, target_ulong addr,         \
+     }                                                           \
+ }
+ 
+-/* Signed min/max */
+-#define DO_MAX(N, M)  ((N) >= (M) ? (N) : (M))
+-#define DO_MIN(N, M)  ((N) >= (M) ? (M) : (N))
+-
+-/* Unsigned min/max */
+-#define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
+-#define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
+-
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w,  32, 32, H4, DO_ADD,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w,  32, 32, H4, DO_XOR,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w,  32, 32, H4, DO_AND,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w,   32, 32, H4, DO_OR,   l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w,  32, 32, H4, DO_MIN,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w,  32, 32, H4, DO_MAX,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei8_32_v,  uint32_t, H4, DO_SWAP, l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei8_64_v,  uint64_t, H8, DO_SWAP, q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei16_32_v, uint32_t, H4, DO_SWAP, l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei16_64_v, uint64_t, H8, DO_SWAP, q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei32_32_v, uint32_t, H4, DO_SWAP, l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei32_64_v, uint64_t, H8, DO_SWAP, q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei8_32_v,   uint32_t, H4, DO_ADD,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei8_64_v,   uint64_t, H8, DO_ADD,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei16_32_v,  uint32_t, H4, DO_ADD,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei16_64_v,  uint64_t, H8, DO_ADD,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei32_32_v,  uint32_t, H4, DO_ADD,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei32_64_v,  uint64_t, H8, DO_ADD,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei8_32_v,   uint32_t, H4, DO_XOR,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei8_64_v,   uint64_t, H8, DO_XOR,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei16_32_v,  uint32_t, H4, DO_XOR,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei16_64_v,  uint64_t, H8, DO_XOR,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei32_32_v,  uint32_t, H4, DO_XOR,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei32_64_v,  uint64_t, H8, DO_XOR,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei8_32_v,   uint32_t, H4, DO_AND,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei8_64_v,   uint64_t, H8, DO_AND,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei16_32_v,  uint32_t, H4, DO_AND,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei16_64_v,  uint64_t, H8, DO_AND,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei32_32_v,  uint32_t, H4, DO_AND,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei32_64_v,  uint64_t, H8, DO_AND,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei8_32_v,    uint32_t, H4, DO_OR,   l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei8_64_v,    uint64_t, H8, DO_OR,   q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei16_32_v,   uint32_t, H4, DO_OR,   l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei16_64_v,   uint64_t, H8, DO_OR,   q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei32_32_v,   uint32_t, H4, DO_OR,   l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei32_64_v,   uint64_t, H8, DO_OR,   q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei8_32_v,   int32_t,  H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei8_64_v,   int64_t,  H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei16_32_v,  int32_t,  H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei16_64_v,  int64_t,  H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei32_32_v,  int32_t,  H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei32_64_v,  int64_t,  H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei8_32_v,   int32_t,  H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei8_64_v,   int64_t,  H8, DO_MAX,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei16_32_v,  int32_t,  H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei16_64_v,  int64_t,  H8, DO_MAX,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei32_32_v,  int32_t,  H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei32_64_v,  int64_t,  H8, DO_MAX,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei8_32_v,  uint32_t, H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei8_64_v,  uint64_t, H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei16_32_v, uint32_t, H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei16_64_v, uint64_t, H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei32_32_v, uint32_t, H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei32_64_v, uint64_t, H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei8_32_v,  uint32_t, H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei8_64_v,  uint64_t, H8, DO_MAX,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei16_32_v, uint32_t, H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei16_64_v, uint64_t, H8, DO_MAX,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei32_32_v, uint32_t, H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei32_64_v, uint64_t, H8, DO_MAX,  q)
+ #ifdef TARGET_RISCV64
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d,  64, 32, H8, DO_ADD,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d,  64, 64, H8, DO_ADD,  q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d,  64, 32, H8, DO_XOR,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d,  64, 64, H8, DO_XOR,  q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d,  64, 32, H8, DO_AND,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d,  64, 64, H8, DO_AND,  q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d,   64, 32, H8, DO_OR,   l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d,   64, 64, H8, DO_OR,   q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d,  64, 32, H8, DO_MIN,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d,  64, 64, H8, DO_MIN,  q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d,  64, 32, H8, DO_MAX,  l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d,  64, 64, H8, DO_MAX,  q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l)
+-GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei64_32_v, uint32_t, H4, DO_SWAP, l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoswapei64_64_v, uint64_t, H8, DO_SWAP, q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei64_32_v,  uint32_t, H4, DO_ADD,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoaddei64_64_v,  uint64_t, H8, DO_ADD,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei64_32_v,  uint32_t, H4, DO_XOR,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoxorei64_64_v,  uint64_t, H8, DO_XOR,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei64_32_v,  uint32_t, H4, DO_AND,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoandei64_64_v,  uint64_t, H8, DO_AND,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei64_32_v,   uint32_t, H4, DO_OR,   l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamoorei64_64_v,   uint64_t, H8, DO_OR,   q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei64_32_v,  int32_t,  H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominei64_64_v,  int64_t,  H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei64_32_v,  int32_t,  H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxei64_64_v,  int64_t,  H8, DO_MAX,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei64_32_v, uint32_t, H4, DO_MIN,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamominuei64_64_v, uint64_t, H8, DO_MIN,  q)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei64_32_v, uint32_t, H4, DO_MAX,  l)
++GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuei64_64_v, uint64_t, H8, DO_MAX,  q)
+ #endif
+ 
+ static inline void
+@@ -609,7 +645,7 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+                   void *vs2, CPURISCVState *env, uint32_t desc,
+                   vext_get_index_addr get_index_addr,
+                   vext_amo_noatomic_fn *noatomic_op,
+-                  uint32_t esz, uint32_t msz, uintptr_t ra)
++                  uint32_t esz, uintptr_t ra)
+ {
+     uint32_t i;
+     target_long addr;
+@@ -620,8 +656,8 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+-        probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD);
+-        probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE);
++        probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_LOAD);
++        probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_STORE);
+     }
+     for (i = 0; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+@@ -632,45 +668,89 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+     }
+ }
+ 
+-#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN)              \
++#define GEN_VEXT_AMO(NAME, ETYPE, INDEX_FN)                     \
+ void HELPER(NAME)(void *vs3, void *v0, target_ulong base,       \
+                   void *vs2, CPURISCVState *env, uint32_t desc) \
+ {                                                               \
+     vext_amo_noatomic(vs3, v0, base, vs2, env, desc,            \
+                       INDEX_FN, vext_##NAME##_noatomic_op,      \
+-                      sizeof(ETYPE), sizeof(MTYPE),             \
+-                      GETPC());                                 \
+-}
+-
++                      sizeof(ETYPE), GETPC());                  \
++}
++
++GEN_VEXT_AMO(vamoswapei8_32_v,  int32_t, idx_b)
++GEN_VEXT_AMO(vamoswapei8_64_v,  int64_t, idx_b)
++GEN_VEXT_AMO(vamoswapei16_32_v, int32_t, idx_h)
++GEN_VEXT_AMO(vamoswapei16_64_v, int64_t, idx_h)
++GEN_VEXT_AMO(vamoswapei32_32_v, int32_t, idx_w)
++GEN_VEXT_AMO(vamoswapei32_64_v, int64_t, idx_w)
++GEN_VEXT_AMO(vamoaddei8_32_v,   int32_t, idx_b)
++GEN_VEXT_AMO(vamoaddei8_64_v,   int64_t, idx_b)
++GEN_VEXT_AMO(vamoaddei16_32_v,  int32_t, idx_h)
++GEN_VEXT_AMO(vamoaddei16_64_v,  int64_t, idx_h)
++GEN_VEXT_AMO(vamoaddei32_32_v,  int32_t, idx_w)
++GEN_VEXT_AMO(vamoaddei32_64_v,  int64_t, idx_w)
++GEN_VEXT_AMO(vamoxorei8_32_v,   int32_t, idx_b)
++GEN_VEXT_AMO(vamoxorei8_64_v,   int64_t, idx_b)
++GEN_VEXT_AMO(vamoxorei16_32_v,  int32_t, idx_h)
++GEN_VEXT_AMO(vamoxorei16_64_v,  int64_t, idx_h)
++GEN_VEXT_AMO(vamoxorei32_32_v,  int32_t, idx_w)
++GEN_VEXT_AMO(vamoxorei32_64_v,  int64_t, idx_w)
++GEN_VEXT_AMO(vamoandei8_32_v,   int32_t, idx_b)
++GEN_VEXT_AMO(vamoandei8_64_v,   int64_t, idx_b)
++GEN_VEXT_AMO(vamoandei16_32_v,  int32_t, idx_h)
++GEN_VEXT_AMO(vamoandei16_64_v,  int64_t, idx_h)
++GEN_VEXT_AMO(vamoandei32_32_v,  int32_t, idx_w)
++GEN_VEXT_AMO(vamoandei32_64_v,  int64_t, idx_w)
++GEN_VEXT_AMO(vamoorei8_32_v,    int32_t, idx_b)
++GEN_VEXT_AMO(vamoorei8_64_v,    int64_t, idx_b)
++GEN_VEXT_AMO(vamoorei16_32_v,   int32_t, idx_h)
++GEN_VEXT_AMO(vamoorei16_64_v,   int64_t, idx_h)
++GEN_VEXT_AMO(vamoorei32_32_v,   int32_t, idx_w)
++GEN_VEXT_AMO(vamoorei32_64_v,   int64_t, idx_w)
++GEN_VEXT_AMO(vamominei8_32_v,   int32_t, idx_b)
++GEN_VEXT_AMO(vamominei8_64_v,   int64_t, idx_b)
++GEN_VEXT_AMO(vamominei16_32_v,  int32_t, idx_h)
++GEN_VEXT_AMO(vamominei16_64_v,  int64_t, idx_h)
++GEN_VEXT_AMO(vamominei32_32_v,  int32_t, idx_w)
++GEN_VEXT_AMO(vamominei32_64_v,  int64_t, idx_w)
++GEN_VEXT_AMO(vamomaxei8_32_v,   int32_t, idx_b)
++GEN_VEXT_AMO(vamomaxei8_64_v,   int64_t, idx_b)
++GEN_VEXT_AMO(vamomaxei16_32_v,  int32_t, idx_h)
++GEN_VEXT_AMO(vamomaxei16_64_v,  int64_t, idx_h)
++GEN_VEXT_AMO(vamomaxei32_32_v,  int32_t, idx_w)
++GEN_VEXT_AMO(vamomaxei32_64_v,  int64_t, idx_w)
++GEN_VEXT_AMO(vamominuei8_32_v,  int32_t, idx_b)
++GEN_VEXT_AMO(vamominuei8_64_v,  int64_t, idx_b)
++GEN_VEXT_AMO(vamominuei16_32_v, int32_t, idx_h)
++GEN_VEXT_AMO(vamominuei16_64_v, int64_t, idx_h)
++GEN_VEXT_AMO(vamominuei32_32_v, int32_t, idx_w)
++GEN_VEXT_AMO(vamominuei32_64_v, int64_t, idx_w)
++GEN_VEXT_AMO(vamomaxuei8_32_v,  int32_t, idx_b)
++GEN_VEXT_AMO(vamomaxuei8_64_v,  int64_t, idx_b)
++GEN_VEXT_AMO(vamomaxuei16_32_v, int32_t, idx_h)
++GEN_VEXT_AMO(vamomaxuei16_64_v, int64_t, idx_h)
++GEN_VEXT_AMO(vamomaxuei32_32_v, int32_t, idx_w)
++GEN_VEXT_AMO(vamomaxuei32_64_v, int64_t, idx_w)
+ #ifdef TARGET_RISCV64
+-GEN_VEXT_AMO(vamoswapw_v_d, int32_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoswapd_v_d, int64_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoaddw_v_d,  int32_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoaddd_v_d,  int64_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoxorw_v_d,  int32_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoxord_v_d,  int64_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoandw_v_d,  int32_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoandd_v_d,  int64_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoorw_v_d,   int32_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamoord_v_d,   int64_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamominw_v_d,  int32_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamomind_v_d,  int64_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamomaxw_v_d,  int32_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamomaxd_v_d,  int64_t,  int64_t,  idx_d)
+-GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d)
+-GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d)
+-GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d)
+-GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d)
++GEN_VEXT_AMO(vamoswapei64_32_v, int32_t, idx_d)
++GEN_VEXT_AMO(vamoswapei64_64_v, int64_t, idx_d)
++GEN_VEXT_AMO(vamoaddei64_32_v,  int32_t, idx_d)
++GEN_VEXT_AMO(vamoaddei64_64_v,  int64_t, idx_d)
++GEN_VEXT_AMO(vamoxorei64_32_v,  int32_t, idx_d)
++GEN_VEXT_AMO(vamoxorei64_64_v,  int64_t, idx_d)
++GEN_VEXT_AMO(vamoandei64_32_v,  int32_t, idx_d)
++GEN_VEXT_AMO(vamoandei64_64_v,  int64_t, idx_d)
++GEN_VEXT_AMO(vamoorei64_32_v,   int32_t, idx_d)
++GEN_VEXT_AMO(vamoorei64_64_v,   int64_t, idx_d)
++GEN_VEXT_AMO(vamominei64_32_v,  int32_t, idx_d)
++GEN_VEXT_AMO(vamominei64_64_v,  int64_t, idx_d)
++GEN_VEXT_AMO(vamomaxei64_32_v,  int32_t, idx_d)
++GEN_VEXT_AMO(vamomaxei64_64_v,  int64_t, idx_d)
++GEN_VEXT_AMO(vamominuei64_32_v, int32_t, idx_d)
++GEN_VEXT_AMO(vamominuei64_64_v, int64_t, idx_d)
++GEN_VEXT_AMO(vamomaxuei64_32_v, int32_t, idx_d)
++GEN_VEXT_AMO(vamomaxuei64_64_v, int64_t, idx_d)
+ #endif
+-GEN_VEXT_AMO(vamoswapw_v_w, int32_t,  int32_t,  idx_w)
+-GEN_VEXT_AMO(vamoaddw_v_w,  int32_t,  int32_t,  idx_w)
+-GEN_VEXT_AMO(vamoxorw_v_w,  int32_t,  int32_t,  idx_w)
+-GEN_VEXT_AMO(vamoandw_v_w,  int32_t,  int32_t,  idx_w)
+-GEN_VEXT_AMO(vamoorw_v_w,   int32_t,  int32_t,  idx_w)
+-GEN_VEXT_AMO(vamominw_v_w,  int32_t,  int32_t,  idx_w)
+-GEN_VEXT_AMO(vamomaxw_v_w,  int32_t,  int32_t,  idx_w)
+-GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w)
+-GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w)
+ 
+ /*
+  *** Vector Integer Arithmetic Instructions
+-- 
+2.33.1
+

+ 244 - 0
recipes-devtools/qemu/qemu/0031-target-riscv-rvv-1.0-load-store-whole-register-instr.patch

@@ -0,0 +1,244 @@
+From fd907cac2c360bd2442ffd809212e0c7428b41e5 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 17 Aug 2020 09:41:07 +0800
+Subject: [PATCH 031/107] target/riscv: rvv-1.0: load/store whole register
+ instructions
+
+Add the following instructions:
+
+* vl<nf>re<eew>.v
+* vs<nf>r.v
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/helper.h                   | 21 ++++++++
+ target/riscv/insn32.decode              | 22 ++++++++
+ target/riscv/insn_trans/trans_rvv.c.inc | 69 +++++++++++++++++++++++++
+ target/riscv/vector_helper.c            | 65 +++++++++++++++++++++++
+ 4 files changed, 177 insertions(+)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index f26af64d5b..f35cd987ee 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -174,6 +174,27 @@ DEF_HELPER_5(vle16ff_v, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vle32ff_v, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vle64ff_v, void, ptr, ptr, tl, env, i32)
+ 
++DEF_HELPER_4(vl1re8_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl1re16_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl1re32_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl1re64_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl2re8_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl2re16_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl2re32_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl2re64_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl4re8_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl4re16_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl4re32_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl4re64_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl8re8_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl8re16_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl8re32_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vl8re64_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vs1r_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vs2r_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vs4r_v, void, ptr, tl, env, i32)
++DEF_HELPER_4(vs8r_v, void, ptr, tl, env, i32)
++
+ DEF_HELPER_6(vamoswapei8_32_v, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vamoswapei8_64_v, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vamoswapei16_32_v, void, ptr, ptr, tl, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index ae406dff3b..dec3fe1f34 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -278,6 +278,28 @@ vle16ff_v     ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+ vle32ff_v     ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+ vle64ff_v     ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
+ 
++# Vector whole register insns
++vl1re8_v      000 000 1 01000 ..... 000 ..... 0000111 @r2
++vl1re16_v     000 000 1 01000 ..... 101 ..... 0000111 @r2
++vl1re32_v     000 000 1 01000 ..... 110 ..... 0000111 @r2
++vl1re64_v     000 000 1 01000 ..... 111 ..... 0000111 @r2
++vl2re8_v      001 000 1 01000 ..... 000 ..... 0000111 @r2
++vl2re16_v     001 000 1 01000 ..... 101 ..... 0000111 @r2
++vl2re32_v     001 000 1 01000 ..... 110 ..... 0000111 @r2
++vl2re64_v     001 000 1 01000 ..... 111 ..... 0000111 @r2
++vl4re8_v      011 000 1 01000 ..... 000 ..... 0000111 @r2
++vl4re16_v     011 000 1 01000 ..... 101 ..... 0000111 @r2
++vl4re32_v     011 000 1 01000 ..... 110 ..... 0000111 @r2
++vl4re64_v     011 000 1 01000 ..... 111 ..... 0000111 @r2
++vl8re8_v      111 000 1 01000 ..... 000 ..... 0000111 @r2
++vl8re16_v     111 000 1 01000 ..... 101 ..... 0000111 @r2
++vl8re32_v     111 000 1 01000 ..... 110 ..... 0000111 @r2
++vl8re64_v     111 000 1 01000 ..... 111 ..... 0000111 @r2
++vs1r_v        000 000 1 01000 ..... 000 ..... 0100111 @r2
++vs2r_v        001 000 1 01000 ..... 000 ..... 0100111 @r2
++vs4r_v        011 000 1 01000 ..... 000 ..... 0100111 @r2
++vs8r_v        111 000 1 01000 ..... 000 ..... 0100111 @r2
++
+ #*** Vector AMO operations are encoded under the standard AMO major opcode ***
+ vamoswapei8_v   00001 . . ..... ..... 000 ..... 0101111 @r_wdvm
+ vamoswapei16_v  00001 . . ..... ..... 101 ..... 0101111 @r_wdvm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 5057dff5eb..146d330894 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -1014,6 +1014,75 @@ GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
+ GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
+ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
+ 
++/*
++ * load and store whole register instructions
++ */
++typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
++
++static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
++                             gen_helper_ldst_whole *fn, DisasContext *s,
++                             bool is_store)
++{
++    TCGv_ptr dest;
++    TCGv base;
++    TCGv_i32 desc;
++
++    uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
++    dest = tcg_temp_new_ptr();
++    base = tcg_temp_new();
++    desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
++
++    gen_get_gpr(base, rs1);
++    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
++
++    fn(dest, base, cpu_env, desc);
++
++    tcg_temp_free_ptr(dest);
++    tcg_temp_free(base);
++    tcg_temp_free_i32(desc);
++    if (!is_store) {
++        mark_vs_dirty(s);
++    }
++    return true;
++}
++
++/*
++ * load and store whole register instructions ignore vtype and vl setting.
++ * Thus, we don't need to check vill bit. (Section 7.9)
++ */
++#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, IS_STORE)                      \
++static bool trans_##NAME(DisasContext *s, arg_##NAME * a)                 \
++{                                                                         \
++    if (require_rvv(s) &&                                                 \
++        QEMU_IS_ALIGNED(a->rd, ARG_NF)) {                                 \
++        return ldst_whole_trans(a->rd, a->rs1, ARG_NF, gen_helper_##NAME, \
++                                s, IS_STORE);                             \
++    }                                                                     \
++    return false;                                                         \
++}
++
++GEN_LDST_WHOLE_TRANS(vl1re8_v,  1, false)
++GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, false)
++GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, false)
++GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, false)
++GEN_LDST_WHOLE_TRANS(vl2re8_v,  2, false)
++GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, false)
++GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, false)
++GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, false)
++GEN_LDST_WHOLE_TRANS(vl4re8_v,  4, false)
++GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, false)
++GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, false)
++GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, false)
++GEN_LDST_WHOLE_TRANS(vl8re8_v,  8, false)
++GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, false)
++GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, false)
++GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, false)
++
++GEN_LDST_WHOLE_TRANS(vs1r_v, 1, true)
++GEN_LDST_WHOLE_TRANS(vs2r_v, 2, true)
++GEN_LDST_WHOLE_TRANS(vs4r_v, 4, true)
++GEN_LDST_WHOLE_TRANS(vs8r_v, 8, true)
++
+ /*
+  *** vector atomic operation
+  */
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 8bc3bf77a3..05ec6e040c 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -534,6 +534,71 @@ GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h)
+ GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w)
+ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d)
+ 
++/*
++ *** load and store whole register instructions
++ */
++static void
++vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
++                vext_ldst_elem_fn *ldst_elem, uint32_t esz, uintptr_t ra,
++                MMUAccessType access_type)
++{
++    uint32_t i, k;
++    uint32_t nf = vext_nf(desc);
++    uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
++    uint32_t max_elems = vlenb >> esz;
++
++    /* probe every access */
++    probe_pages(env, base, vlenb * nf, ra, access_type);
++
++    /* load bytes from guest memory */
++    for (k = 0; k < nf; k++) {
++        for (i = 0; i < max_elems; i++) {
++            target_ulong addr = base + ((i + k * max_elems) << esz);
++            ldst_elem(env, addr, i + k * max_elems, vd, ra);
++        }
++    }
++}
++
++#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN)      \
++void HELPER(NAME)(void *vd, target_ulong base,       \
++                  CPURISCVState *env, uint32_t desc) \
++{                                                    \
++    vext_ldst_whole(vd, base, env, desc, LOAD_FN,    \
++                    ctzl(sizeof(ETYPE)), GETPC(),    \
++                    MMU_DATA_LOAD);                  \
++}
++
++GEN_VEXT_LD_WHOLE(vl1re8_v,  int8_t,  lde_b)
++GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h)
++GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w)
++GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d)
++GEN_VEXT_LD_WHOLE(vl2re8_v,  int8_t,  lde_b)
++GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h)
++GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w)
++GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d)
++GEN_VEXT_LD_WHOLE(vl4re8_v,  int8_t,  lde_b)
++GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h)
++GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w)
++GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d)
++GEN_VEXT_LD_WHOLE(vl8re8_v,  int8_t,  lde_b)
++GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h)
++GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w)
++GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d)
++
++#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN)     \
++void HELPER(NAME)(void *vd, target_ulong base,       \
++                  CPURISCVState *env, uint32_t desc) \
++{                                                    \
++    vext_ldst_whole(vd, base, env, desc, STORE_FN,   \
++                    ctzl(sizeof(ETYPE)), GETPC(),    \
++                    MMU_DATA_STORE);                 \
++}
++
++GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b)
++GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b)
++GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b)
++GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
++
+ /*
+  *** Vector AMO Operations (Zvamo)
+  */
+-- 
+2.33.1
+

+ 411 - 0
recipes-devtools/qemu/qemu/0032-target-riscv-rvv-1.0-update-vext_max_elems-for-load-.patch

@@ -0,0 +1,411 @@
+From 1c1e08410bf1c6f1f8fe14aaa369b9cb57192293 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 29 Sep 2020 23:25:37 +0800
+Subject: [PATCH 032/107] target/riscv: rvv-1.0: update vext_max_elems() for
+ load/store insns
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 32 +++++++--
+ target/riscv/vector_helper.c            | 90 ++++++++++++++-----------
+ 2 files changed, 74 insertions(+), 48 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 146d330894..a992bd170d 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -586,6 +586,12 @@ static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
+     return false;                                            \
+ }
+ 
++static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
++{
++    int8_t emul = eew - s->sew + s->lmul;
++    return emul < 0 ? 0 : emul;
++}
++
+ /*
+  *** unit stride load and store
+  */
+@@ -651,8 +657,14 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
+         return false;
+     }
+ 
++    /*
++     * Vector load/store instructions have the EEW encoded
++     * directly in the instructions. The maximum vector size is
++     * calculated with EMUL rather than LMUL.
++     */
++    uint8_t emul = vext_get_emul(s, eew);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++    data = FIELD_DP32(data, VDATA, LMUL, emul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+     return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
+ }
+@@ -687,8 +699,9 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
+         return false;
+     }
+ 
++    uint8_t emul = vext_get_emul(s, eew);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++    data = FIELD_DP32(data, VDATA, LMUL, emul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+     return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
+ }
+@@ -761,8 +774,9 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+         return false;
+     }
+ 
++    uint8_t emul = vext_get_emul(s, eew);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++    data = FIELD_DP32(data, VDATA, LMUL, emul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+     return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
+ }
+@@ -789,8 +803,9 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+         gen_helper_vsse32_v,  gen_helper_vsse64_v
+     };
+ 
++    uint8_t emul = vext_get_emul(s, eew);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++    data = FIELD_DP32(data, VDATA, LMUL, emul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+     fn = fns[eew];
+     if (fn == NULL) {
+@@ -887,8 +902,9 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+ 
+     fn = fns[eew][s->sew];
+ 
++    uint8_t emul = vext_get_emul(s, s->sew);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++    data = FIELD_DP32(data, VDATA, LMUL, emul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+     return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
+ }
+@@ -938,8 +954,9 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
+ 
+     fn = fns[eew][s->sew];
+ 
++    uint8_t emul = vext_get_emul(s, s->sew);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++    data = FIELD_DP32(data, VDATA, LMUL, emul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+     return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
+ }
+@@ -1003,8 +1020,9 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
+         return false;
+     }
+ 
++    uint8_t emul = vext_get_emul(s, eew);
+     data = FIELD_DP32(data, VDATA, VM, a->vm);
+-    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++    data = FIELD_DP32(data, VDATA, LMUL, emul);
+     data = FIELD_DP32(data, VDATA, NF, a->nf);
+     return ldff_trans(a->rd, a->rs1, data, fn, s);
+ }
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 05ec6e040c..0a9fb898a1 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -17,6 +17,7 @@
+  */
+ 
+ #include "qemu/osdep.h"
++#include "qemu/host-utils.h"
+ #include "cpu.h"
+ #include "exec/memop.h"
+ #include "exec/exec-all.h"
+@@ -121,14 +122,21 @@ static uint32_t vext_wd(uint32_t desc)
+ }
+ 
+ /*
+- * Get vector group length in bytes. Its range is [64, 2048].
++ * Get the maximum number of elements can be operated.
+  *
+- * As simd_desc support at most 256, the max vlen is 512 bits.
+- * So vlen in bytes is encoded as maxsz.
++ * esz: log2 of element size in bytes.
+  */
+-static inline uint32_t vext_maxsz(uint32_t desc)
++static inline uint32_t vext_max_elems(uint32_t desc, uint32_t esz)
+ {
+-    return simd_maxsz(desc) << vext_lmul(desc);
++    /*
++     * As simd_desc support at most 256 bytes, the max vlen is 256 bits.
++     * so vlen in bytes (vlenb) is encoded as maxsz.
++     */
++    uint32_t vlenb = simd_maxsz(desc);
++
++    /* Return VLMAX */
++    int scale = vext_lmul(desc) - esz;
++    return scale < 0 ? vlenb >> -scale : vlenb << scale;
+ }
+ 
+ /*
+@@ -221,14 +229,14 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
++    uint32_t max_elems = vext_max_elems(desc, esz);
+ 
+     /* probe every access*/
+     for (i = 0; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+-        probe_pages(env, base + stride * i, nf * esz, ra, access_type);
++        probe_pages(env, base + stride * i, nf << esz, ra, access_type);
+     }
+     /* do real access */
+     for (i = 0; i < env->vl; i++) {
+@@ -237,8 +245,8 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+             continue;
+         }
+         while (k < nf) {
+-            target_ulong addr = base + stride * i + k * esz;
+-            ldst_elem(env, addr, i + k * vlmax, vd, ra);
++            target_ulong addr = base + stride * i + (k << esz);
++            ldst_elem(env, addr, i + k * max_elems, vd, ra);
+             k++;
+         }
+     }
+@@ -251,7 +259,7 @@ void HELPER(NAME)(void *vd, void * v0, target_ulong base,               \
+ {                                                                       \
+     uint32_t vm = vext_vm(desc);                                        \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN,      \
+-                     sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);            \
++                     ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD);      \
+ }
+ 
+ GEN_VEXT_LD_STRIDE(vlse8_v,  int8_t,  lde_b)
+@@ -266,7 +274,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+ {                                                                       \
+     uint32_t vm = vext_vm(desc);                                        \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN,     \
+-                     sizeof(ETYPE), GETPC(), MMU_DATA_STORE);           \
++                     ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE);     \
+ }
+ 
+ GEN_VEXT_ST_STRIDE(vsse8_v,  int8_t,  ste_b)
+@@ -286,16 +294,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
++    uint32_t max_elems = vext_max_elems(desc, esz);
+ 
+     /* probe every access */
+-    probe_pages(env, base, env->vl * nf * esz, ra, access_type);
++    probe_pages(env, base, env->vl * (nf << esz), ra, access_type);
+     /* load bytes from guest memory */
+     for (i = 0; i < env->vl; i++) {
+         k = 0;
+         while (k < nf) {
+-            target_ulong addr = base + (i * nf + k) * esz;
+-            ldst_elem(env, addr, i + k * vlmax, vd, ra);
++            target_ulong addr = base + ((i * nf + k) << esz);
++            ldst_elem(env, addr, i + k * max_elems, vd, ra);
+             k++;
+         }
+     }
+@@ -310,16 +318,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+                          CPURISCVState *env, uint32_t desc)             \
+ {                                                                       \
+-    uint32_t stride = vext_nf(desc) * sizeof(ETYPE);                    \
++    uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE));             \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN,   \
+-                     sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);            \
++                     ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD);      \
+ }                                                                       \
+                                                                         \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   CPURISCVState *env, uint32_t desc)                    \
+ {                                                                       \
+     vext_ldst_us(vd, base, env, desc, LOAD_FN,                          \
+-                 sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);                \
++                 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD);          \
+ }
+ 
+ GEN_VEXT_LD_US(vle8_v,  int8_t,  lde_b)
+@@ -331,16 +339,16 @@ GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
+ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+                          CPURISCVState *env, uint32_t desc)             \
+ {                                                                       \
+-    uint32_t stride = vext_nf(desc) * sizeof(ETYPE);                    \
++    uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE));             \
+     vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN,  \
+-                     sizeof(ETYPE), GETPC(), MMU_DATA_STORE);           \
++                     ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE);     \
+ }                                                                       \
+                                                                         \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   CPURISCVState *env, uint32_t desc)                    \
+ {                                                                       \
+     vext_ldst_us(vd, base, env, desc, STORE_FN,                         \
+-                 sizeof(ETYPE), GETPC(), MMU_DATA_STORE);               \
++                 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE);         \
+ }
+ 
+ GEN_VEXT_ST_US(vse8_v,  int8_t,  ste_b)
+@@ -376,14 +384,14 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+     uint32_t vm = vext_vm(desc);
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
++    uint32_t max_elems = vext_max_elems(desc, esz);
+ 
+     /* probe every access*/
+     for (i = 0; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+-        probe_pages(env, get_index_addr(base, i, vs2), nf * esz, ra,
++        probe_pages(env, get_index_addr(base, i, vs2), nf << esz, ra,
+                     access_type);
+     }
+     /* load bytes from guest memory */
+@@ -393,8 +401,8 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+             continue;
+         }
+         while (k < nf) {
+-            abi_ptr addr = get_index_addr(base, i, vs2) + k * esz;
+-            ldst_elem(env, addr, i + k * vlmax, vd, ra);
++            abi_ptr addr = get_index_addr(base, i, vs2) + (k << esz);
++            ldst_elem(env, addr, i + k * max_elems, vd, ra);
+             k++;
+         }
+     }
+@@ -405,7 +413,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                   \
+                   void *vs2, CPURISCVState *env, uint32_t desc)            \
+ {                                                                          \
+     vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN,                \
+-                    LOAD_FN, sizeof(ETYPE), GETPC(), MMU_DATA_LOAD);       \
++                    LOAD_FN, ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
+ }
+ 
+ GEN_VEXT_LD_INDEX(vlxei8_8_v,   int8_t,  idx_b, lde_b)
+@@ -430,7 +438,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,         \
+                   void *vs2, CPURISCVState *env, uint32_t desc)  \
+ {                                                                \
+     vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN,      \
+-                    STORE_FN, sizeof(ETYPE),                     \
++                    STORE_FN, ctzl(sizeof(ETYPE)),               \
+                     GETPC(), MMU_DATA_STORE);                    \
+ }
+ 
+@@ -464,7 +472,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
+     uint32_t i, k, vl = 0;
+     uint32_t nf = vext_nf(desc);
+     uint32_t vm = vext_vm(desc);
+-    uint32_t vlmax = vext_maxsz(desc) / esz;
++    uint32_t max_elems = vext_max_elems(desc, esz);
+     target_ulong addr, offset, remain;
+ 
+     /* probe every access*/
+@@ -472,24 +480,24 @@ vext_ldff(void *vd, void *v0, target_ulong base,
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+-        addr = base + nf * i * esz;
++        addr = base + i * (nf << esz);
+         if (i == 0) {
+-            probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
++            probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
+         } else {
+             /* if it triggers an exception, no need to check watchpoint */
+-            remain = nf * esz;
++            remain = nf << esz;
+             while (remain > 0) {
+                 offset = -(addr | TARGET_PAGE_MASK);
+                 host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD,
+                                          cpu_mmu_index(env, false));
+                 if (host) {
+ #ifdef CONFIG_USER_ONLY
+-                    if (page_check_range(addr, nf * esz, PAGE_READ) < 0) {
++                    if (page_check_range(addr, nf << esz, PAGE_READ) < 0) {
+                         vl = i;
+                         goto ProbeSuccess;
+                     }
+ #else
+-                    probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
++                    probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
+ #endif
+                 } else {
+                     vl = i;
+@@ -514,8 +522,8 @@ ProbeSuccess:
+             continue;
+         }
+         while (k < nf) {
+-            target_ulong addr = base + (i * nf + k) * esz;
+-            ldst_elem(env, addr, i + k * vlmax, vd, ra);
++            target_ulong addr = base + ((i * nf + k) << esz);
++            ldst_elem(env, addr, i + k * max_elems, vd, ra);
+             k++;
+         }
+     }
+@@ -526,7 +534,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,  \
+                   CPURISCVState *env, uint32_t desc)      \
+ {                                                         \
+     vext_ldff(vd, v0, base, env, desc, LOAD_FN,           \
+-              sizeof(ETYPE), GETPC());                    \
++              ctzl(sizeof(ETYPE)), GETPC());              \
+ }
+ 
+ GEN_VEXT_LDFF(vle8ff_v,  int8_t,  lde_b)
+@@ -739,7 +747,7 @@ void HELPER(NAME)(void *vs3, void *v0, target_ulong base,       \
+ {                                                               \
+     vext_amo_noatomic(vs3, v0, base, vs2, env, desc,            \
+                       INDEX_FN, vext_##NAME##_noatomic_op,      \
+-                      sizeof(ETYPE), GETPC());                  \
++                      ctzl(sizeof(ETYPE)), GETPC());            \
+ }
+ 
+ GEN_VEXT_AMO(vamoswapei8_32_v,  int32_t, idx_b)
+@@ -1225,7 +1233,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,          \
+                   void *vs2, CPURISCVState *env, uint32_t desc) \
+ {                                                               \
+     uint32_t vl = env->vl;                                      \
+-    uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);          \
++    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
+     uint32_t i;                                                 \
+                                                                 \
+     for (i = 0; i < vl; i++) {                                  \
+@@ -3880,7 +3888,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2,       \
+ {                                                                   \
+     uint32_t vm = vext_vm(desc);                                    \
+     uint32_t vl = env->vl;                                          \
+-    uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);              \
++    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE)));     \
+     uint32_t i;                                                     \
+                                                                     \
+     for (i = 0; i < vl; i++) {                                      \
+@@ -4666,7 +4674,7 @@ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
++    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE)));           \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint64_t index;                                                       \
+@@ -4695,7 +4703,7 @@ GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
++    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE)));           \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint64_t index = s1;                                                  \
+-- 
+2.33.1
+

+ 110 - 0
recipes-devtools/qemu/qemu/0033-target-riscv-rvv-1.0-take-fractional-LMUL-into-vecto.patch

@@ -0,0 +1,110 @@
+From bf3c1bd265917cfcfd93215d5eebfa9d154816da Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 14 Aug 2020 17:31:34 +0800
+Subject: [PATCH 033/107] target/riscv: rvv-1.0: take fractional LMUL into
+ vector max elements calculation
+
+Update vext_get_vlmax() and MAXSZ() to take fractional LMUL into
+calculation for RVV 1.0.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/cpu.h                      | 43 ++++++++++++++++++-------
+ target/riscv/insn_trans/trans_rvv.c.inc | 12 ++++++-
+ 2 files changed, 42 insertions(+), 13 deletions(-)
+
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index 3835d22ca1..f773bae96b 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -404,18 +404,27 @@ FIELD(TB_FLAGS, HLSX, 15, 1)
+ bool riscv_cpu_is_32bit(CPURISCVState *env);
+ 
+ /*
+- * A simplification for VLMAX
+- * = (1 << LMUL) * VLEN / (8 * (1 << SEW))
+- * = (VLEN << LMUL) / (8 << SEW)
+- * = (VLEN << LMUL) >> (SEW + 3)
+- * = VLEN >> (SEW + 3 - LMUL)
++ * Encode LMUL to lmul as follows:
++ *     LMUL    vlmul    lmul
++ *      1       000       0
++ *      2       001       1
++ *      4       010       2
++ *      8       011       3
++ *      -       100       -
++ *     1/8      101      -3
++ *     1/4      110      -2
++ *     1/2      111      -1
++ *
++ * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
++ * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
++ *      => VLMAX = vlen >> (1 + 3 - (-3))
++ *               = 256 >> 7
++ *               = 2
+  */
+ static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
+ {
+-    uint8_t sew, lmul;
+-
+-    sew = FIELD_EX64(vtype, VTYPE, VSEW);
+-    lmul = FIELD_EX64(vtype, VTYPE, VLMUL);
++    uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
++    int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
+     return cpu->cfg.vlen >> (sew + 3 - lmul);
+ }
+ 
+@@ -428,12 +437,22 @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
+     *cs_base = 0;
+ 
+     if (riscv_has_ext(env, RVV)) {
++        /*
++         * If env->vl equals to VLMAX, we can use generic vector operation
++         * expanders (GVEC) to accerlate the vector operations.
++         * However, as LMUL could be a fractional number. The maximum
++         * vector size can be operated might be less than 8 bytes,
++         * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
++         * only when maxsz >= 8 bytes.
++         */
+         uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
+-        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
++        uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
++        uint32_t maxsz = vlmax << sew;
++        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl)
++                           && (maxsz >= 8);
+         flags = FIELD_DP32(flags, TB_FLAGS, VILL,
+                     FIELD_EX64(env->vtype, VTYPE, VILL));
+-        flags = FIELD_DP32(flags, TB_FLAGS, SEW,
+-                    FIELD_EX64(env->vtype, VTYPE, VSEW));
++        flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
+         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
+                     FIELD_EX64(env->vtype, VTYPE, VLMUL));
+         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index a992bd170d..71f2343d4d 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -1268,7 +1268,17 @@ GEN_VEXT_AMO_TRANS(vamomaxuei64_v, MO_64, 35, rwdvm, amo_op, amo_check)
+ /*
+  *** Vector Integer Arithmetic Instructions
+  */
+-#define MAXSZ(s) (s->vlen >> (3 - s->lmul))
++
++/*
++ * MAXSZ returns the maximum vector size can be operated in bytes,
++ * which is used in GVEC IR when vl_eq_vlmax flag is set to true
++ * to accerlate vector operation.
++ */
++static inline uint32_t MAXSZ(DisasContext *s)
++{
++    int scale = s->lmul - 3;
++    return scale < 0 ? s->vlen >> -scale : s->vlen << scale;
++}
+ 
+ static bool opivv_check(DisasContext *s, arg_rmrr *a)
+ {
+-- 
+2.33.1
+

+ 29 - 0
recipes-devtools/qemu/qemu/0034-target-riscv-rvv-1.0-floating-point-square-root-inst.patch

@@ -0,0 +1,29 @@
+From 8dbf85414916b53047dd762cf3fa7f501d6db6f0 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 11:44:23 +0800
+Subject: [PATCH 034/107] target/riscv: rvv-1.0: floating-point square-root
+ instruction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn32.decode | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index dec3fe1f34..c41965f1d8 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -538,7 +538,7 @@ vfwmsac_vv      111110 . ..... ..... 001 ..... 1010111 @r_vm
+ vfwmsac_vf      111110 . ..... ..... 101 ..... 1010111 @r_vm
+ vfwnmsac_vv     111111 . ..... ..... 001 ..... 1010111 @r_vm
+ vfwnmsac_vf     111111 . ..... ..... 101 ..... 1010111 @r_vm
+-vfsqrt_v        100011 . ..... 00000 001 ..... 1010111 @r2_vm
++vfsqrt_v        010011 . ..... 00000 001 ..... 1010111 @r2_vm
+ vfmin_vv        000100 . ..... ..... 001 ..... 1010111 @r_vm
+ vfmin_vf        000100 . ..... ..... 101 ..... 1010111 @r_vm
+ vfmax_vv        000110 . ..... ..... 001 ..... 1010111 @r_vm
+-- 
+2.33.1
+

+ 29 - 0
recipes-devtools/qemu/qemu/0035-target-riscv-rvv-1.0-floating-point-classify-instruc.patch

@@ -0,0 +1,29 @@
+From 5cbca8fd51a876580ca8b59eaa45ae3748cba4e9 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 10 Jul 2020 14:11:31 +0800
+Subject: [PATCH 035/107] target/riscv: rvv-1.0: floating-point classify
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn32.decode | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index c41965f1d8..ac04311bca 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -561,7 +561,7 @@ vmfgt_vf        011101 . ..... ..... 101 ..... 1010111 @r_vm
+ vmfge_vf        011111 . ..... ..... 101 ..... 1010111 @r_vm
+ vmford_vv       011010 . ..... ..... 001 ..... 1010111 @r_vm
+ vmford_vf       011010 . ..... ..... 101 ..... 1010111 @r_vm
+-vfclass_v       100011 . ..... 10000 001 ..... 1010111 @r2_vm
++vfclass_v       010011 . ..... 10000 001 ..... 1010111 @r2_vm
+ vfmerge_vfm     010111 0 ..... ..... 101 ..... 1010111 @r_vm_0
+ vfmv_v_f        010111 1 00000 ..... 101 ..... 1010111 @r2
+ vfcvt_xu_f_v    100010 . ..... 00000 001 ..... 1010111 @r2_vm
+-- 
+2.33.1
+

+ 92 - 0
recipes-devtools/qemu/qemu/0036-target-riscv-rvv-1.0-mask-population-count-instructi.patch

@@ -0,0 +1,92 @@
+From 0c65425aa1d9040a0f65452ecf80f86f2f0889da Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 11:49:38 +0800
+Subject: [PATCH 036/107] target/riscv: rvv-1.0: mask population count
+ instruction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   | 2 +-
+ target/riscv/insn32.decode              | 2 +-
+ target/riscv/insn_trans/trans_rvv.c.inc | 7 ++++---
+ target/riscv/vector_helper.c            | 6 +++---
+ 4 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index f35cd987ee..741f69dc44 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1095,7 +1095,7 @@ DEF_HELPER_6(vmnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vmornot_mm, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
+ 
+-DEF_HELPER_4(vmpopc_m, tl, ptr, ptr, env, i32)
++DEF_HELPER_4(vpopc_m, tl, ptr, ptr, env, i32)
+ 
+ DEF_HELPER_4(vmfirst_m, tl, ptr, ptr, env, i32)
+ 
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index ac04311bca..618ddf963c 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -602,7 +602,7 @@ vmor_mm         011010 - ..... ..... 010 ..... 1010111 @r
+ vmnor_mm        011110 - ..... ..... 010 ..... 1010111 @r
+ vmornot_mm      011100 - ..... ..... 010 ..... 1010111 @r
+ vmxnor_mm       011111 - ..... ..... 010 ..... 1010111 @r
+-vmpopc_m        010100 . ..... ----- 010 ..... 1010111 @r2_vm
++vpopc_m         010000 . ..... 10000 010 ..... 1010111 @r2_vm
+ vmfirst_m       010101 . ..... ----- 010 ..... 1010111 @r2_vm
+ vmsbf_m         010110 . ..... 00001 010 ..... 1010111 @r2_vm
+ vmsif_m         010110 . ..... 00011 010 ..... 1010111 @r2_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 71f2343d4d..57ef3ed183 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2893,8 +2893,8 @@ GEN_MM_TRANS(vmnor_mm)
+ GEN_MM_TRANS(vmornot_mm)
+ GEN_MM_TRANS(vmxnor_mm)
+ 
+-/* Vector mask population count vmpopc */
+-static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
++/* Vector mask population count vpopc */
++static bool trans_vpopc_m(DisasContext *s, arg_rmr *a)
+ {
+     if (require_rvv(s) &&
+         vext_check_isa_ill(s)) {
+@@ -2913,13 +2913,14 @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
+         tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
+         tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ 
+-        gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc);
++        gen_helper_vpopc_m(dst, mask, src2, cpu_env, desc);
+         gen_set_gpr(a->rd, dst);
+ 
+         tcg_temp_free_ptr(mask);
+         tcg_temp_free_ptr(src2);
+         tcg_temp_free(dst);
+         tcg_temp_free_i32(desc);
++
+         return true;
+     }
+     return false;
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 0a9fb898a1..cc76b60470 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4420,9 +4420,9 @@ GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR)
+ GEN_VEXT_MASK_VV(vmornot_mm, DO_ORNOT)
+ GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR)
+ 
+-/* Vector mask population count vmpopc */
+-target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env,
+-                              uint32_t desc)
++/* Vector mask population count vpopc */
++target_ulong HELPER(vpopc_m)(void *v0, void *vs2, CPURISCVState *env,
++                             uint32_t desc)
+ {
+     target_ulong cnt = 0;
+     uint32_t vm = vext_vm(desc);
+-- 
+2.33.1
+

+ 83 - 0
recipes-devtools/qemu/qemu/0037-target-riscv-rvv-1.0-find-first-set-mask-bit-instruc.patch

@@ -0,0 +1,83 @@
+From 874cd3e6085af5927a69591edc48734224a7677c Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 11:51:56 +0800
+Subject: [PATCH 037/107] target/riscv: rvv-1.0: find-first-set mask bit
+ instruction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   | 2 +-
+ target/riscv/insn32.decode              | 2 +-
+ target/riscv/insn_trans/trans_rvv.c.inc | 4 ++--
+ target/riscv/vector_helper.c            | 6 +++---
+ 4 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 741f69dc44..8f2d41c610 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1097,7 +1097,7 @@ DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
+ 
+ DEF_HELPER_4(vpopc_m, tl, ptr, ptr, env, i32)
+ 
+-DEF_HELPER_4(vmfirst_m, tl, ptr, ptr, env, i32)
++DEF_HELPER_4(vfirst_m, tl, ptr, ptr, env, i32)
+ 
+ DEF_HELPER_5(vmsbf_m, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vmsif_m, void, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 618ddf963c..c66bc4fad1 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -603,7 +603,7 @@ vmnor_mm        011110 - ..... ..... 010 ..... 1010111 @r
+ vmornot_mm      011100 - ..... ..... 010 ..... 1010111 @r
+ vmxnor_mm       011111 - ..... ..... 010 ..... 1010111 @r
+ vpopc_m         010000 . ..... 10000 010 ..... 1010111 @r2_vm
+-vmfirst_m       010101 . ..... ----- 010 ..... 1010111 @r2_vm
++vfirst_m        010000 . ..... 10001 010 ..... 1010111 @r2_vm
+ vmsbf_m         010110 . ..... 00001 010 ..... 1010111 @r2_vm
+ vmsif_m         010110 . ..... 00011 010 ..... 1010111 @r2_vm
+ vmsof_m         010110 . ..... 00010 010 ..... 1010111 @r2_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 57ef3ed183..52ad12bf43 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2927,7 +2927,7 @@ static bool trans_vpopc_m(DisasContext *s, arg_rmr *a)
+ }
+ 
+ /* vmfirst find-first-set mask bit */
+-static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
++static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
+ {
+     if (require_rvv(s) &&
+         vext_check_isa_ill(s)) {
+@@ -2946,7 +2946,7 @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
+         tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
+         tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ 
+-        gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc);
++        gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc);
+         gen_set_gpr(a->rd, dst);
+ 
+         tcg_temp_free_ptr(mask);
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index cc76b60470..fbde77326e 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4439,9 +4439,9 @@ target_ulong HELPER(vpopc_m)(void *v0, void *vs2, CPURISCVState *env,
+     return cnt;
+ }
+ 
+-/* vmfirst find-first-set mask bit*/
+-target_ulong HELPER(vmfirst_m)(void *v0, void *vs2, CPURISCVState *env,
+-                               uint32_t desc)
++/* vfirst find-first-set mask bit*/
++target_ulong HELPER(vfirst_m)(void *v0, void *vs2, CPURISCVState *env,
++                              uint32_t desc)
+ {
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+-- 
+2.33.1
+

+ 72 - 0
recipes-devtools/qemu/qemu/0038-target-riscv-rvv-1.0-set-X-first-mask-bit-instructio.patch

@@ -0,0 +1,72 @@
+From b31244216af0f936b1839b917e1f0e11d189c760 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 11:54:41 +0800
+Subject: [PATCH 038/107] target/riscv: rvv-1.0: set-X-first mask bit
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn32.decode              | 6 +++---
+ target/riscv/insn_trans/trans_rvv.c.inc | 5 ++++-
+ target/riscv/vector_helper.c            | 4 ----
+ 3 files changed, 7 insertions(+), 8 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index c66bc4fad1..58ca0a7638 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -604,9 +604,9 @@ vmornot_mm      011100 - ..... ..... 010 ..... 1010111 @r
+ vmxnor_mm       011111 - ..... ..... 010 ..... 1010111 @r
+ vpopc_m         010000 . ..... 10000 010 ..... 1010111 @r2_vm
+ vfirst_m        010000 . ..... 10001 010 ..... 1010111 @r2_vm
+-vmsbf_m         010110 . ..... 00001 010 ..... 1010111 @r2_vm
+-vmsif_m         010110 . ..... 00011 010 ..... 1010111 @r2_vm
+-vmsof_m         010110 . ..... 00010 010 ..... 1010111 @r2_vm
++vmsbf_m         010100 . ..... 00001 010 ..... 1010111 @r2_vm
++vmsif_m         010100 . ..... 00011 010 ..... 1010111 @r2_vm
++vmsof_m         010100 . ..... 00010 010 ..... 1010111 @r2_vm
+ viota_m         010110 . ..... 10000 010 ..... 1010111 @r2_vm
+ vid_v           010110 . 00000 10001 010 ..... 1010111 @r1_vm
+ vext_x_v        001100 1 ..... ..... 010 ..... 1010111 @r
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 52ad12bf43..ae53c83a8d 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2964,7 +2964,10 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
+ #define GEN_M_TRANS(NAME)                                          \
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+ {                                                                  \
+-    if (vext_check_isa_ill(s)) {                                   \
++    if (require_rvv(s) &&                                          \
++        vext_check_isa_ill(s) &&                                   \
++        require_vm(a->vm, a->rd) &&                                \
++        (a->rd != a->rs2)) {                                       \
+         uint32_t data = 0;                                         \
+         gen_helper_gvec_3_ptr *fn = gen_helper_##NAME;             \
+         TCGLabel *over = gen_new_label();                          \
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index fbde77326e..cb12585956 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4466,7 +4466,6 @@ enum set_mask_type {
+ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
+                    uint32_t desc, enum set_mask_type type)
+ {
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;
+     uint32_t vm = vext_vm(desc);
+     uint32_t vl = env->vl;
+     int i;
+@@ -4496,9 +4495,6 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
+             }
+         }
+     }
+-    for (; i < vlmax; i++) {
+-        vext_set_elem_mask(vd, i, 0);
+-    }
+ }
+ 
+ void HELPER(vmsbf_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
+-- 
+2.33.1
+

+ 53 - 0
recipes-devtools/qemu/qemu/0039-target-riscv-rvv-1.0-iota-instruction.patch

@@ -0,0 +1,53 @@
+From 1ba2b152d26eb428c30514fc965ed8f1d2947ea7 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 11:58:38 +0800
+Subject: [PATCH 039/107] target/riscv: rvv-1.0: iota instruction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn32.decode              |  2 +-
+ target/riscv/insn_trans/trans_rvv.c.inc | 10 ++++++++--
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 58ca0a7638..fde1aa4088 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -607,7 +607,7 @@ vfirst_m        010000 . ..... 10001 010 ..... 1010111 @r2_vm
+ vmsbf_m         010100 . ..... 00001 010 ..... 1010111 @r2_vm
+ vmsif_m         010100 . ..... 00011 010 ..... 1010111 @r2_vm
+ vmsof_m         010100 . ..... 00010 010 ..... 1010111 @r2_vm
+-viota_m         010110 . ..... 10000 010 ..... 1010111 @r2_vm
++viota_m         010100 . ..... 10000 010 ..... 1010111 @r2_vm
+ vid_v           010110 . 00000 10001 010 ..... 1010111 @r1_vm
+ vext_x_v        001100 1 ..... ..... 010 ..... 1010111 @r
+ vmv_s_x         001101 1 00000 ..... 110 ..... 1010111 @r2
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index ae53c83a8d..77f2a8552b 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2989,12 +2989,18 @@ GEN_M_TRANS(vmsbf_m)
+ GEN_M_TRANS(vmsif_m)
+ GEN_M_TRANS(vmsof_m)
+ 
+-/* Vector Iota Instruction */
++/*
++ * Vector Iota Instruction
++ *
++ * 1. The destination register cannot overlap the source register.
++ * 2. If masked, cannot overlap the mask register ('v0').
++ * 3. An illegal instruction exception is raised if vstart is non-zero.
++ */
+ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
+ {
+     if (require_rvv(s) &&
+         vext_check_isa_ill(s) &&
+-        require_noover(a->rd, s->lmul, a->rs2, 0) &&
++        !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
+         require_vm(a->vm, a->rd) &&
+         require_align(a->rd, s->lmul)) {
+         uint32_t data = 0;
+-- 
+2.33.1
+

+ 27 - 0
recipes-devtools/qemu/qemu/0040-target-riscv-rvv-1.0-element-index-instruction.patch

@@ -0,0 +1,27 @@
+From a7826d9b805c2e985ef9705ec4f17a46de00a612 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 11:59:34 +0800
+Subject: [PATCH 040/107] target/riscv: rvv-1.0: element index instruction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn32.decode | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index fde1aa4088..05ccfe9356 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -608,7 +608,7 @@ vmsbf_m         010100 . ..... 00001 010 ..... 1010111 @r2_vm
+ vmsif_m         010100 . ..... 00011 010 ..... 1010111 @r2_vm
+ vmsof_m         010100 . ..... 00010 010 ..... 1010111 @r2_vm
+ viota_m         010100 . ..... 10000 010 ..... 1010111 @r2_vm
+-vid_v           010110 . 00000 10001 010 ..... 1010111 @r1_vm
++vid_v           010100 . 00000 10001 010 ..... 1010111 @r1_vm
+ vext_x_v        001100 1 ..... ..... 010 ..... 1010111 @r
+ vmv_s_x         001101 1 00000 ..... 110 ..... 1010111 @r2
+ vfmv_f_s        001100 1 ..... 00000 001 ..... 1010111 @r2rd
+-- 
+2.33.1
+

+ 104 - 0
recipes-devtools/qemu/qemu/0041-target-riscv-rvv-1.0-allow-load-element-with-sign-ex.patch

@@ -0,0 +1,104 @@
+From 03e2b117dd0ead5a60d98120bbb066db4168bd84 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 21 Jul 2020 00:40:11 +0800
+Subject: [PATCH 041/107] target/riscv: rvv-1.0: allow load element with
+ sign-extended
+
+For some vector instructions (e.g. vmv.s.x), the element is loaded with
+sign-extended.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 32 +++++++++++++++++--------
+ 1 file changed, 22 insertions(+), 10 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 77f2a8552b..a4365e3a19 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3056,17 +3056,29 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
+ /* Integer Extract Instruction */
+ 
+ static void load_element(TCGv_i64 dest, TCGv_ptr base,
+-                         int ofs, int sew)
++                         int ofs, int sew, bool sign)
+ {
+     switch (sew) {
+     case MO_8:
+-        tcg_gen_ld8u_i64(dest, base, ofs);
++        if (!sign) {
++            tcg_gen_ld8u_i64(dest, base, ofs);
++        } else {
++            tcg_gen_ld8s_i64(dest, base, ofs);
++        }
+         break;
+     case MO_16:
+-        tcg_gen_ld16u_i64(dest, base, ofs);
++        if (!sign) {
++            tcg_gen_ld16u_i64(dest, base, ofs);
++        } else {
++            tcg_gen_ld16s_i64(dest, base, ofs);
++        }
+         break;
+     case MO_32:
+-        tcg_gen_ld32u_i64(dest, base, ofs);
++        if (!sign) {
++            tcg_gen_ld32u_i64(dest, base, ofs);
++        } else {
++            tcg_gen_ld32s_i64(dest, base, ofs);
++        }
+         break;
+     case MO_64:
+         tcg_gen_ld_i64(dest, base, ofs);
+@@ -3121,7 +3133,7 @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
+ 
+     /* Perform the load. */
+     load_element(dest, base,
+-                 vreg_ofs(s, vreg), s->sew);
++                 vreg_ofs(s, vreg), s->sew, false);
+     tcg_temp_free_ptr(base);
+     tcg_temp_free_i32(ofs);
+ 
+@@ -3139,9 +3151,9 @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
+ }
+ 
+ static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
+-                              int vreg, int idx)
++                              int vreg, int idx, bool sign)
+ {
+-    load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew);
++    load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign);
+ }
+ 
+ static bool trans_vext_x_v(DisasContext *s, arg_r *a)
+@@ -3151,7 +3163,7 @@ static bool trans_vext_x_v(DisasContext *s, arg_r *a)
+ 
+     if (a->rs1 == 0) {
+         /* Special case vmv.x.s rd, vs2. */
+-        vec_element_loadi(s, tmp, a->rs2, 0);
++        vec_element_loadi(s, tmp, a->rs2, 0, false);
+     } else {
+         /* This instruction ignores LMUL and vector register groups */
+         int vlmax = s->vlen >> (3 + s->sew);
+@@ -3233,7 +3245,7 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
+         (s->mstatus_fs != 0) && (s->sew != 0)) {
+         unsigned int len = 8 << s->sew;
+ 
+-        vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0);
++        vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
+         if (len < 64) {
+             tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
+                             MAKE_64BIT_MASK(len, 64 - len));
+@@ -3335,7 +3347,7 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
+         TCGv_i64 dest = tcg_temp_new_i64();
+ 
+         if (a->rs1 == 0) {
+-            vec_element_loadi(s, dest, a->rs2, 0);
++            vec_element_loadi(s, dest, a->rs2, 0, false);
+         } else {
+             vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
+         }
+-- 
+2.33.1
+

+ 150 - 0
recipes-devtools/qemu/qemu/0042-target-riscv-rvv-1.0-register-gather-instructions.patch

@@ -0,0 +1,150 @@
+From 76d0e5eb1bf05a113604735c0eb4e25500a7fac2 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 17 Aug 2020 14:25:17 +0800
+Subject: [PATCH 042/107] target/riscv: rvv-1.0: register gather instructions
+
+* Add vrgatherei16.vv instruction.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/helper.h                   |  4 ++++
+ target/riscv/insn32.decode              |  1 +
+ target/riscv/insn_trans/trans_rvv.c.inc | 27 ++++++++++++++++++++++---
+ target/riscv/vector_helper.c            | 23 ++++++++++++---------
+ 4 files changed, 43 insertions(+), 12 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 8f2d41c610..3e1150d5eb 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1134,6 +1134,10 @@ DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vrgatherei16_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vrgatherei16_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vrgatherei16_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vrgatherei16_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 05ccfe9356..e1b20b68e7 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -620,6 +620,7 @@ vslidedown_vx   001111 . ..... ..... 100 ..... 1010111 @r_vm
+ vslidedown_vi   001111 . ..... ..... 011 ..... 1010111 @r_vm
+ vslide1down_vx  001111 . ..... ..... 110 ..... 1010111 @r_vm
+ vrgather_vv     001100 . ..... ..... 000 ..... 1010111 @r_vm
++vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm
+ vrgather_vx     001100 . ..... ..... 100 ..... 1010111 @r_vm
+ vrgather_vi     001100 . ..... ..... 011 ..... 1010111 @r_vm
+ vcompress_vm    010111 - ..... ..... 010 ..... 1010111 @r
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index a4365e3a19..c517e5302a 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3323,7 +3323,25 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
+            require_vm(a->vm, a->rd);
+ }
+ 
++static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
++{
++    int8_t emul = MO_16 - s->sew + s->lmul;
++    return require_rvv(s) &&
++           vext_check_isa_ill(s) &&
++           (emul >= -3 && emul <= 3) &&
++           require_align(a->rd, s->lmul) &&
++           require_align(a->rs1, emul) &&
++           require_align(a->rs2, s->lmul) &&
++           (a->rd != a->rs2 && a->rd != a->rs1) &&
++           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
++                          a->rs1, 1 << MAX(emul, 0)) &&
++           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
++                          a->rs2, 1 << MAX(s->lmul, 0)) &&
++           require_vm(a->vm, a->rd);
++}
++
+ GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
++GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
+ 
+ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
+ {
+@@ -3343,7 +3361,8 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
+     }
+ 
+     if (a->vm && s->vl_eq_vlmax) {
+-        int vlmax = s->vlen;
++        int scale = s->lmul - (s->sew + 3);
++        int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
+         TCGv_i64 dest = tcg_temp_new_i64();
+ 
+         if (a->rs1 == 0) {
+@@ -3374,8 +3393,10 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
+     }
+ 
+     if (a->vm && s->vl_eq_vlmax) {
+-        if (a->rs1 >= s->vlen) {
+-            tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
++        int scale = s->lmul - (s->sew + 3);
++        int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
++        if (a->rs1 >= vlmax) {
++            tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
+                                  MAXSZ(s), MAXSZ(s), 0);
+         } else {
+             tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index cb12585956..9291f5c9ca 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4666,11 +4666,11 @@ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4)
+ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
+ 
+ /* Vector Register Gather Instruction */
+-#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H)                              \
++#define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2)                    \
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE)));           \
++    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS1)));             \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint64_t index;                                                       \
+@@ -4680,20 +4680,25 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+         if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+-        index = *((ETYPE *)vs1 + H(i));                                   \
++        index = *((TS1 *)vs1 + HS1(i));                                   \
+         if (index >= vlmax) {                                             \
+-            *((ETYPE *)vd + H(i)) = 0;                                    \
++            *((TS2 *)vd + HS2(i)) = 0;                                    \
+         } else {                                                          \
+-            *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index));           \
++            *((TS2 *)vd + HS2(i)) = *((TS2 *)vs2 + HS2(index));           \
+         }                                                                 \
+     }                                                                     \
+ }
+ 
+ /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t,  H1)
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2)
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4)
+-GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t,  uint8_t,  H1, H1)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, uint16_t, H2, H2)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, uint32_t, H4, H4)
++GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, uint64_t, H8, H8)
++
++GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_b, uint16_t, uint8_t,  H2, H1)
++GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_h, uint16_t, uint16_t, H2, H2)
++GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_w, uint16_t, uint32_t, H2, H4)
++GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_d, uint16_t, uint64_t, H2, H8)
+ 
+ #define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H)                              \
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+-- 
+2.33.1
+

+ 103 - 0
recipes-devtools/qemu/qemu/0043-target-riscv-rvv-1.0-integer-scalar-move-instruction.patch

@@ -0,0 +1,103 @@
+From ec37910c0fbfda7dc82b9c032714e3bad3f9ef46 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 14 Jul 2020 01:16:44 +0800
+Subject: [PATCH 043/107] target/riscv: rvv-1.0: integer scalar move
+ instructions
+
+* Remove "vmv.s.x: dothing if rs1 == 0" constraint.
+* Add vmv.x.s instruction.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn32.decode              |  3 +-
+ target/riscv/insn_trans/trans_rvv.c.inc | 45 ++++++++++++++++++++-----
+ 2 files changed, 39 insertions(+), 9 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index e1b20b68e7..0b17eed559 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -609,8 +609,9 @@ vmsif_m         010100 . ..... 00011 010 ..... 1010111 @r2_vm
+ vmsof_m         010100 . ..... 00010 010 ..... 1010111 @r2_vm
+ viota_m         010100 . ..... 10000 010 ..... 1010111 @r2_vm
+ vid_v           010100 . 00000 10001 010 ..... 1010111 @r1_vm
++vmv_x_s         010000 1 ..... 00000 010 ..... 1010111 @r2rd
++vmv_s_x         010000 1 00000 ..... 110 ..... 1010111 @r2
+ vext_x_v        001100 1 ..... ..... 010 ..... 1010111 @r
+-vmv_s_x         001101 1 00000 ..... 110 ..... 1010111 @r2
+ vfmv_f_s        001100 1 ..... 00000 001 ..... 1010111 @r2rd
+ vfmv_s_f        001101 1 00000 ..... 101 ..... 1010111 @r2
+ vslideup_vx     001110 . ..... ..... 100 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index c517e5302a..d83d30c101 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3211,27 +3211,56 @@ static void vec_element_storei(DisasContext *s, int vreg,
+     store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
+ }
+ 
++/* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
++static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
++{
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s)) {
++        TCGv_i64 t1;
++        TCGv dest;
++
++        t1 = tcg_temp_new_i64();
++        dest = tcg_temp_new();
++        /*
++         * load vreg and sign-extend to 64 bits,
++         * then truncate to XLEN bits before storing to gpr.
++         */
++        vec_element_loadi(s, t1, a->rs2, 0, true);
++        tcg_gen_trunc_i64_tl(dest, t1);
++        gen_set_gpr(a->rd, dest);
++        tcg_temp_free_i64(t1);
++        tcg_temp_free(dest);
++
++        return true;
++    }
++    return false;
++}
++
+ /* vmv.s.x vd, rs1 # vd[0] = rs1 */
+ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
+ {
+-    if (vext_check_isa_ill(s)) {
++    if (require_rvv(s) &&
++        vext_check_isa_ill(s)) {
+         /* This instruction ignores LMUL and vector register groups */
+-        int maxsz = s->vlen >> 3;
+         TCGv_i64 t1;
++        TCGv s1;
+         TCGLabel *over = gen_new_label();
+ 
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+-        tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), maxsz, maxsz, 0);
+-        if (a->rs1 == 0) {
+-            goto done;
+-        }
+ 
+         t1 = tcg_temp_new_i64();
+-        tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]);
++        s1 = tcg_temp_new();
++
++        /*
++         * load gpr and sign-extend to 64 bits,
++         * then truncate to SEW bits when storing to vreg.
++         */
++        gen_get_gpr(s1, a->rs1);
++        tcg_gen_ext_tl_i64(t1, s1);
+         vec_element_storei(s, a->rd, 0, t1);
+         tcg_temp_free_i64(t1);
++        tcg_temp_free(s1);
+         mark_vs_dirty(s);
+-    done:
+         gen_set_label(over);
+         return true;
+     }
+-- 
+2.33.1
+

+ 62 - 0
recipes-devtools/qemu/qemu/0044-target-riscv-rvv-1.0-floating-point-move-instruction.patch

@@ -0,0 +1,62 @@
+From 943f59cdb3255cf6e353565ee1d9360e67f9c5c5 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 4 Aug 2020 10:27:28 +0800
+Subject: [PATCH 044/107] target/riscv: rvv-1.0: floating-point move
+ instruction
+
+NaN-boxed the scalar floating-point register based on RVV 1.0's rules.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index d83d30c101..0c8b5e232d 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2691,9 +2691,15 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+         require_rvf(s) &&
+         vext_check_isa_ill(s) &&
+         require_align(a->rd, s->lmul)) {
++        TCGv_i64 t1;
++
+         if (s->vl_eq_vlmax) {
++            t1 = tcg_temp_new_i64();
++            /* NaN-box f[rs1] */
++            do_nanbox(s, t1, cpu_fpr[a->rs1]);
++
+             tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
+-                                 MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
++                                 MAXSZ(s), MAXSZ(s), t1);
+             mark_vs_dirty(s);
+         } else {
+             TCGv_ptr dest;
+@@ -2707,16 +2713,22 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+             TCGLabel *over = gen_new_label();
+             tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ 
++            t1 = tcg_temp_new_i64();
++            /* NaN-box f[rs1] */
++            do_nanbox(s, t1, cpu_fpr[a->rs1]);
++
+             dest = tcg_temp_new_ptr();
+             desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
+             tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
+-            fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc);
++
++            fns[s->sew - 1](dest, t1, cpu_env, desc);
+ 
+             tcg_temp_free_ptr(dest);
+             tcg_temp_free_i32(desc);
+             mark_vs_dirty(s);
+             gen_set_label(over);
+         }
++        tcg_temp_free_i64(t1);
+         return true;
+     }
+     return false;
+-- 
+2.33.1
+

+ 114 - 0
recipes-devtools/qemu/qemu/0045-target-riscv-rvv-1.0-floating-point-scalar-move-inst.patch

@@ -0,0 +1,114 @@
+From f33ffb6ecdc0b978b0980c3117fab6a276f902ff Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 1 Dec 2020 22:30:18 +0800
+Subject: [PATCH 045/107] target/riscv: rvv-1.0: floating-point scalar move
+ instructions
+
+NaN-boxed the scalar floating-point register based on RVV 1.0's rules.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/insn32.decode              |  4 +--
+ target/riscv/insn_trans/trans_rvv.c.inc | 39 +++++++++++++------------
+ target/riscv/internals.h                |  5 ----
+ 3 files changed, 22 insertions(+), 26 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 0b17eed559..41eb9628d3 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -612,8 +612,8 @@ vid_v           010100 . 00000 10001 010 ..... 1010111 @r1_vm
+ vmv_x_s         010000 1 ..... 00000 010 ..... 1010111 @r2rd
+ vmv_s_x         010000 1 00000 ..... 110 ..... 1010111 @r2
+ vext_x_v        001100 1 ..... ..... 010 ..... 1010111 @r
+-vfmv_f_s        001100 1 ..... 00000 001 ..... 1010111 @r2rd
+-vfmv_s_f        001101 1 00000 ..... 101 ..... 1010111 @r2
++vfmv_f_s        010000 1 ..... 00000 001 ..... 1010111 @r2rd
++vfmv_s_f        010000 1 00000 ..... 101 ..... 1010111 @r2
+ vslideup_vx     001110 . ..... ..... 100 ..... 1010111 @r_vm
+ vslideup_vi     001110 . ..... ..... 011 ..... 1010111 @r_vm
+ vslide1up_vx    001110 . ..... ..... 110 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 0c8b5e232d..39b5d9a064 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3282,14 +3282,20 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
+ /* Floating-Point Scalar Move Instructions */
+ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
+ {
+-    if (!s->vill && has_ext(s, RVF) &&
+-        (s->mstatus_fs != 0) && (s->sew != 0)) {
+-        unsigned int len = 8 << s->sew;
++    if (require_rvv(s) &&
++        require_rvf(s) &&
++        vext_check_isa_ill(s)) {
++        unsigned int ofs = (8 << s->sew);
++        unsigned int len = 64 - ofs;
++        TCGv_i64 t_nan;
+ 
+         vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
+-        if (len < 64) {
+-            tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
+-                            MAKE_64BIT_MASK(len, 64 - len));
++        /* NaN-box f[rd] as necessary for SEW */
++        if (len) {
++            t_nan = tcg_const_i64(UINT64_MAX);
++            tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
++                                t_nan, ofs, len);
++            tcg_temp_free_i64(t_nan);
+         }
+ 
+         mark_fs_dirty(s);
+@@ -3301,25 +3307,20 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
+ /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
+ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
+ {
+-    if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) {
+-        TCGv_i64 t1;
++    if (require_rvv(s) &&
++        require_rvf(s) &&
++        vext_check_isa_ill(s)) {
+         /* The instructions ignore LMUL and vector register group. */
+-        uint32_t vlmax = s->vlen >> 3;
++        TCGv_i64 t1;
++        TCGLabel *over = gen_new_label();
+ 
+         /* if vl == 0, skip vector register write back */
+-        TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ 
+-        /* zeroed all elements */
+-        tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0);
+-
+-        /* NaN-box f[rs1] as necessary for SEW */
++        /* NaN-box f[rs1] */
+         t1 = tcg_temp_new_i64();
+-        if (s->sew == MO_64 && !has_ext(s, RVD)) {
+-            tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32));
+-        } else {
+-            tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]);
+-        }
++        do_nanbox(s, t1, cpu_fpr[a->rs1]);
++
+         vec_element_storei(s, a->rd, 0, t1);
+         tcg_temp_free_i64(t1);
+         mark_vs_dirty(s);
+diff --git a/target/riscv/internals.h b/target/riscv/internals.h
+index 81f5dfa477..ac062dc0b4 100644
+--- a/target/riscv/internals.h
++++ b/target/riscv/internals.h
+@@ -32,11 +32,6 @@ target_ulong fclass_h(uint64_t frs1);
+ target_ulong fclass_s(uint64_t frs1);
+ target_ulong fclass_d(uint64_t frs1);
+ 
+-#define SEW8  0
+-#define SEW16 1
+-#define SEW32 2
+-#define SEW64 3
+-
+ #ifndef CONFIG_USER_ONLY
+ extern const VMStateDescription vmstate_riscv_cpu;
+ #endif
+-- 
+2.33.1
+

+ 70 - 0
recipes-devtools/qemu/qemu/0046-target-riscv-rvv-1.0-whole-register-move-instruction.patch

@@ -0,0 +1,70 @@
+From 0c30075fced0c87205d9bec510cade4b421b14b5 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 14 Aug 2020 17:02:24 +0800
+Subject: [PATCH 046/107] target/riscv: rvv-1.0: whole register move
+ instructions
+
+Add the following instructions:
+
+* vmv1r.v
+* vmv2r.v
+* vmv4r.v
+* vmv8r.v
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/insn32.decode              |  4 ++++
+ target/riscv/insn_trans/trans_rvv.c.inc | 25 +++++++++++++++++++++++++
+ 2 files changed, 29 insertions(+)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 41eb9628d3..230aa74c51 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -625,6 +625,10 @@ vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm
+ vrgather_vx     001100 . ..... ..... 100 ..... 1010111 @r_vm
+ vrgather_vi     001100 . ..... ..... 011 ..... 1010111 @r_vm
+ vcompress_vm    010111 - ..... ..... 010 ..... 1010111 @r
++vmv1r_v         100111 1 ..... 00000 011 ..... 1010111 @r2rd
++vmv2r_v         100111 1 ..... 00001 011 ..... 1010111 @r2rd
++vmv4r_v         100111 1 ..... 00011 011 ..... 1010111 @r2rd
++vmv8r_v         100111 1 ..... 00111 011 ..... 1010111 @r2rd
+ 
+ vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm
+ vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 39b5d9a064..d6c68587ab 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3494,3 +3494,28 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
+     }
+     return false;
+ }
++
++/*
++ * Whole Vector Register Move Instructions ignore vtype and vl setting.
++ * Thus, we don't need to check vill bit. (Section 17.6)
++ */
++#define GEN_VMV_WHOLE_TRANS(NAME, LEN)                          \
++static bool trans_##NAME(DisasContext *s, arg_##NAME * a)       \
++{                                                               \
++    if (require_rvv(s) &&                                       \
++        QEMU_IS_ALIGNED(a->rd, LEN) &&                          \
++        QEMU_IS_ALIGNED(a->rs2, LEN)) {                         \
++        /* EEW = 8 */                                           \
++        tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd),              \
++                         vreg_ofs(s, a->rs2),                   \
++                         s->vlen / 8 * LEN, s->vlen / 8 * LEN); \
++        mark_vs_dirty(s);                                       \
++        return true;                                            \
++    }                                                           \
++    return false;                                               \
++}
++
++GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
++GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
++GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
++GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
+-- 
+2.33.1
+

+ 194 - 0
recipes-devtools/qemu/qemu/0047-target-riscv-rvv-1.0-integer-extension-instructions.patch

@@ -0,0 +1,194 @@
+From 0ab96f313a2f424416b0d33bbb148ab1ccc68788 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 17 Aug 2020 13:38:42 +0800
+Subject: [PATCH 047/107] target/riscv: rvv-1.0: integer extension instructions
+
+Add the following instructions:
+
+* vzext.vf2
+* vzext.vf4
+* vzext.vf8
+* vsext.vf2
+* vsext.vf4
+* vsext.vf8
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   | 14 +++++
+ target/riscv/insn32.decode              |  8 +++
+ target/riscv/insn_trans/trans_rvv.c.inc | 80 +++++++++++++++++++++++++
+ target/riscv/vector_helper.c            | 31 ++++++++++
+ 4 files changed, 133 insertions(+)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 3e1150d5eb..6d67b67311 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1147,3 +1147,17 @@ DEF_HELPER_6(vcompress_vm_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32)
++
++DEF_HELPER_5(vzext_vf2_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vzext_vf2_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vzext_vf2_d, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vzext_vf4_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vzext_vf4_d, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vzext_vf8_d, void, ptr, ptr, ptr, env, i32)
++
++DEF_HELPER_5(vsext_vf2_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vsext_vf2_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vsext_vf2_d, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vsext_vf4_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vsext_vf4_d, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vsext_vf8_d, void, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 230aa74c51..3c735b866d 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -630,6 +630,14 @@ vmv2r_v         100111 1 ..... 00001 011 ..... 1010111 @r2rd
+ vmv4r_v         100111 1 ..... 00011 011 ..... 1010111 @r2rd
+ vmv8r_v         100111 1 ..... 00111 011 ..... 1010111 @r2rd
+ 
++# Vector Integer Extension
++vzext_vf2       010010 . ..... 00110 010 ..... 1010111 @r2_vm
++vzext_vf4       010010 . ..... 00100 010 ..... 1010111 @r2_vm
++vzext_vf8       010010 . ..... 00010 010 ..... 1010111 @r2_vm
++vsext_vf2       010010 . ..... 00111 010 ..... 1010111 @r2_vm
++vsext_vf4       010010 . ..... 00101 010 ..... 1010111 @r2_vm
++vsext_vf8       010010 . ..... 00011 010 ..... 1010111 @r2_vm
++
+ vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm
+ vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
+ 
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index d6c68587ab..58f0edc031 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3519,3 +3519,83 @@ GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
+ GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
+ GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
+ GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
++
++static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
++{
++    uint8_t from = (s->sew + 3) - div;
++    bool ret = require_rvv(s) &&
++        (from >= 3 && from <= 8) &&
++        (a->rd != a->rs2) &&
++        require_align(a->rd, s->lmul) &&
++        require_align(a->rs2, s->lmul - div) &&
++        require_vm(a->vm, a->rd) &&
++        require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
++    return ret;
++}
++
++static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
++{
++    uint32_t data = 0;
++    gen_helper_gvec_3_ptr *fn;
++    TCGLabel *over = gen_new_label();
++    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
++
++    static gen_helper_gvec_3_ptr * const fns[6][4] = {
++        {
++            NULL, gen_helper_vzext_vf2_h,
++            gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
++        },
++        {
++            NULL, NULL,
++            gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
++        },
++        {
++            NULL, NULL,
++            NULL, gen_helper_vzext_vf8_d
++        },
++        {
++            NULL, gen_helper_vsext_vf2_h,
++            gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
++        },
++        {
++            NULL, NULL,
++            gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
++        },
++        {
++            NULL, NULL,
++            NULL, gen_helper_vsext_vf8_d
++        }
++    };
++
++    fn = fns[seq][s->sew];
++    if (fn == NULL) {
++        return false;
++    }
++
++    data = FIELD_DP32(data, VDATA, VM, a->vm);
++
++    tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
++                       vreg_ofs(s, a->rs2), cpu_env, 0,
++                       s->vlen / 8, data, fn);
++
++    mark_vs_dirty(s);
++    gen_set_label(over);
++    return true;
++}
++
++/* Vector Integer Extension */
++#define GEN_INT_EXT_TRANS(NAME, DIV, SEQ)             \
++static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
++{                                                     \
++    if (int_ext_check(s, a, DIV)) {                   \
++        return int_ext_op(s, a, SEQ);                 \
++    }                                                 \
++    return false;                                     \
++}
++
++GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
++GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
++GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
++GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
++GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
++GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 9291f5c9ca..ebbd76c885 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4750,3 +4750,34 @@ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t,  H1)
+ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2)
+ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4)
+ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8)
++
++/* Vector Integer Extension */
++#define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1)            \
++void HELPER(NAME)(void *vd, void *v0, void *vs2,                 \
++                  CPURISCVState *env, uint32_t desc)             \
++{                                                                \
++    uint32_t vl = env->vl;                                       \
++    uint32_t vm = vext_vm(desc);                                 \
++    uint32_t i;                                                  \
++                                                                 \
++    for (i = 0; i < vl; i++) {                                   \
++        if (!vm && !vext_elem_mask(v0, i)) {                     \
++            continue;                                            \
++        }                                                        \
++        *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i));       \
++    }                                                            \
++}
++
++GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t,  H2, H1)
++GEN_VEXT_INT_EXT(vzext_vf2_w, uint32_t, uint16_t, H4, H2)
++GEN_VEXT_INT_EXT(vzext_vf2_d, uint64_t, uint32_t, H8, H4)
++GEN_VEXT_INT_EXT(vzext_vf4_w, uint32_t, uint8_t,  H4, H1)
++GEN_VEXT_INT_EXT(vzext_vf4_d, uint64_t, uint16_t, H8, H2)
++GEN_VEXT_INT_EXT(vzext_vf8_d, uint64_t, uint8_t,  H8, H1)
++
++GEN_VEXT_INT_EXT(vsext_vf2_h, int16_t, int8_t,  H2, H1)
++GEN_VEXT_INT_EXT(vsext_vf2_w, int32_t, int16_t, H4, H2)
++GEN_VEXT_INT_EXT(vsext_vf2_d, int64_t, int32_t, H8, H4)
++GEN_VEXT_INT_EXT(vsext_vf4_w, int32_t, int8_t,  H4, H1)
++GEN_VEXT_INT_EXT(vsext_vf4_d, int64_t, int16_t, H8, H2)
++GEN_VEXT_INT_EXT(vsext_vf8_d, int64_t, int8_t,  H8, H1)
+-- 
+2.33.1
+

+ 203 - 0
recipes-devtools/qemu/qemu/0048-target-riscv-rvv-1.0-single-width-averaging-add-and-.patch

@@ -0,0 +1,203 @@
+From 1a086615faea4f46c8ba625b72c0b6e886536989 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 30 Jul 2020 21:23:01 +0800
+Subject: [PATCH 048/107] target/riscv: rvv-1.0: single-width averaging add and
+ subtract instructions
+
+Add the following instructions:
+
+* vaaddu.vv
+* vaaddu.vx
+* vasubu.vv
+* vasubu.vx
+
+Remove the following instructions:
+
+* vadd.vi
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   | 16 ++++++
+ target/riscv/insn32.decode              | 13 +++--
+ target/riscv/insn_trans/trans_rvv.c.inc |  5 +-
+ target/riscv/vector_helper.c            | 74 +++++++++++++++++++++++++
+ 4 files changed, 102 insertions(+), 6 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 6d67b67311..9ff330e6d8 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -723,18 +723,34 @@ DEF_HELPER_6(vaadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vaadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vaadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vaadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vasub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vasub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vasub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vasub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vasubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vasubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vasubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vasubu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vaadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vaadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vaadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vaadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vaaddu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vasubu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vasubu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vasubu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vasubu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+ 
+ DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 3c735b866d..3806810f4f 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -468,11 +468,14 @@ vssubu_vv       100010 . ..... ..... 000 ..... 1010111 @r_vm
+ vssubu_vx       100010 . ..... ..... 100 ..... 1010111 @r_vm
+ vssub_vv        100011 . ..... ..... 000 ..... 1010111 @r_vm
+ vssub_vx        100011 . ..... ..... 100 ..... 1010111 @r_vm
+-vaadd_vv        100100 . ..... ..... 000 ..... 1010111 @r_vm
+-vaadd_vx        100100 . ..... ..... 100 ..... 1010111 @r_vm
+-vaadd_vi        100100 . ..... ..... 011 ..... 1010111 @r_vm
+-vasub_vv        100110 . ..... ..... 000 ..... 1010111 @r_vm
+-vasub_vx        100110 . ..... ..... 100 ..... 1010111 @r_vm
++vaadd_vv        001001 . ..... ..... 010 ..... 1010111 @r_vm
++vaadd_vx        001001 . ..... ..... 110 ..... 1010111 @r_vm
++vaaddu_vv       001000 . ..... ..... 010 ..... 1010111 @r_vm
++vaaddu_vx       001000 . ..... ..... 110 ..... 1010111 @r_vm
++vasub_vv        001011 . ..... ..... 010 ..... 1010111 @r_vm
++vasub_vx        001011 . ..... ..... 110 ..... 1010111 @r_vm
++vasubu_vv       001010 . ..... ..... 010 ..... 1010111 @r_vm
++vasubu_vx       001010 . ..... ..... 110 ..... 1010111 @r_vm
+ vsmul_vv        100111 . ..... ..... 000 ..... 1010111 @r_vm
+ vsmul_vx        100111 . ..... ..... 100 ..... 1010111 @r_vm
+ vwsmaccu_vv     111100 . ..... ..... 000 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 58f0edc031..d3b71a2bb2 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2237,10 +2237,13 @@ GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
+ 
+ /* Vector Single-Width Averaging Add and Subtract */
+ GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
++GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
+ GEN_OPIVV_TRANS(vasub_vv, opivv_check)
++GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
+ GEN_OPIVX_TRANS(vaadd_vx,  opivx_check)
++GEN_OPIVX_TRANS(vaaddu_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vasub_vx,  opivx_check)
+-GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
++GEN_OPIVX_TRANS(vasubu_vx,  opivx_check)
+ 
+ /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
+ GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index ebbd76c885..55b55e5b59 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -2502,6 +2502,43 @@ GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2)
+ GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4)
+ GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8)
+ 
++static inline uint32_t aaddu32(CPURISCVState *env, int vxrm,
++                               uint32_t a, uint32_t b)
++{
++    uint64_t res = (uint64_t)a + b;
++    uint8_t round = get_round(vxrm, res, 1);
++
++    return (res >> 1) + round;
++}
++
++static inline uint64_t aaddu64(CPURISCVState *env, int vxrm,
++                               uint64_t a, uint64_t b)
++{
++    uint64_t res = a + b;
++    uint8_t round = get_round(vxrm, res, 1);
++    uint64_t over = (uint64_t)(res < a) << 63;
++
++    return ((res >> 1) | over) + round;
++}
++
++RVVCALL(OPIVV2_RM, vaaddu_vv_b, OP_UUU_B, H1, H1, H1, aaddu32)
++RVVCALL(OPIVV2_RM, vaaddu_vv_h, OP_UUU_H, H2, H2, H2, aaddu32)
++RVVCALL(OPIVV2_RM, vaaddu_vv_w, OP_UUU_W, H4, H4, H4, aaddu32)
++RVVCALL(OPIVV2_RM, vaaddu_vv_d, OP_UUU_D, H8, H8, H8, aaddu64)
++GEN_VEXT_VV_RM(vaaddu_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vaaddu_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vaaddu_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vaaddu_vv_d, 8, 8)
++
++RVVCALL(OPIVX2_RM, vaaddu_vx_b, OP_UUU_B, H1, H1, aaddu32)
++RVVCALL(OPIVX2_RM, vaaddu_vx_h, OP_UUU_H, H2, H2, aaddu32)
++RVVCALL(OPIVX2_RM, vaaddu_vx_w, OP_UUU_W, H4, H4, aaddu32)
++RVVCALL(OPIVX2_RM, vaaddu_vx_d, OP_UUU_D, H8, H8, aaddu64)
++GEN_VEXT_VX_RM(vaaddu_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vaaddu_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vaaddu_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vaaddu_vx_d, 8, 8)
++
+ static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+ {
+     int64_t res = (int64_t)a - b;
+@@ -2538,6 +2575,43 @@ GEN_VEXT_VX_RM(vasub_vx_h, 2, 2)
+ GEN_VEXT_VX_RM(vasub_vx_w, 4, 4)
+ GEN_VEXT_VX_RM(vasub_vx_d, 8, 8)
+ 
++static inline uint32_t asubu32(CPURISCVState *env, int vxrm,
++                               uint32_t a, uint32_t b)
++{
++    int64_t res = (int64_t)a - b;
++    uint8_t round = get_round(vxrm, res, 1);
++
++    return (res >> 1) + round;
++}
++
++static inline uint64_t asubu64(CPURISCVState *env, int vxrm,
++                               uint64_t a, uint64_t b)
++{
++    uint64_t res = (uint64_t)a - b;
++    uint8_t round = get_round(vxrm, res, 1);
++    uint64_t over = (uint64_t)(res > a) << 63;
++
++    return ((res >> 1) | over) + round;
++}
++
++RVVCALL(OPIVV2_RM, vasubu_vv_b, OP_UUU_B, H1, H1, H1, asubu32)
++RVVCALL(OPIVV2_RM, vasubu_vv_h, OP_UUU_H, H2, H2, H2, asubu32)
++RVVCALL(OPIVV2_RM, vasubu_vv_w, OP_UUU_W, H4, H4, H4, asubu32)
++RVVCALL(OPIVV2_RM, vasubu_vv_d, OP_UUU_D, H8, H8, H8, asubu64)
++GEN_VEXT_VV_RM(vasubu_vv_b, 1, 1)
++GEN_VEXT_VV_RM(vasubu_vv_h, 2, 2)
++GEN_VEXT_VV_RM(vasubu_vv_w, 4, 4)
++GEN_VEXT_VV_RM(vasubu_vv_d, 8, 8)
++
++RVVCALL(OPIVX2_RM, vasubu_vx_b, OP_UUU_B, H1, H1, asubu32)
++RVVCALL(OPIVX2_RM, vasubu_vx_h, OP_UUU_H, H2, H2, asubu32)
++RVVCALL(OPIVX2_RM, vasubu_vx_w, OP_UUU_W, H4, H4, asubu32)
++RVVCALL(OPIVX2_RM, vasubu_vx_d, OP_UUU_D, H8, H8, asubu64)
++GEN_VEXT_VX_RM(vasubu_vx_b, 1, 1)
++GEN_VEXT_VX_RM(vasubu_vx_h, 2, 2)
++GEN_VEXT_VX_RM(vasubu_vx_w, 4, 4)
++GEN_VEXT_VX_RM(vasubu_vx_d, 8, 8)
++
+ /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
+ static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+ {
+-- 
+2.33.1
+

+ 34 - 0
recipes-devtools/qemu/qemu/0049-target-riscv-rvv-1.0-single-width-bit-shift-instruct.patch

@@ -0,0 +1,34 @@
+From 249b8790558e018fdf8c1d3c585222e00ac08afd Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 20 Jul 2020 22:36:34 +0800
+Subject: [PATCH 049/107] target/riscv: rvv-1.0: single-width bit shift
+ instructions
+
+Truncate vsll.vi, vsrl.vi, vsra.vi's immediate values to lg2(SEW) bits.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index d3b71a2bb2..75a5d6f7a5 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -1915,9 +1915,9 @@ GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx,  shls)
+ GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx,  shrs)
+ GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx,  sars)
+ 
+-GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_ZX, vsll_vx, shli)
+-GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_ZX, vsrl_vx, shri)
+-GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_ZX, vsra_vx, sari)
++GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
++GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
++GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
+ 
+ /* Vector Narrowing Integer Right Shift Instructions */
+ static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
+-- 
+2.33.1
+

+ 130 - 0
recipes-devtools/qemu/qemu/0050-target-riscv-rvv-1.0-integer-add-with-carry-subtract.patch

@@ -0,0 +1,130 @@
+From 7435c8b5574ec0d31b3db9730945057dbc1dee9b Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 30 Jul 2020 21:32:13 +0800
+Subject: [PATCH 050/107] target/riscv: rvv-1.0: integer
+ add-with-carry/subtract-with-borrow
+
+* Only do carry-in or borrow-in if is masked (vm=0).
+* Remove clear function from helper functions as the tail elements
+  are unchanged in RVV 1.0.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/insn32.decode              | 20 ++++++++++----------
+ target/riscv/insn_trans/trans_rvv.c.inc |  2 +-
+ target/riscv/vector_helper.c            | 20 ++++++--------------
+ 3 files changed, 17 insertions(+), 25 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 3806810f4f..462ec504b3 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -353,16 +353,16 @@ vwsubu_wv       110110 . ..... ..... 010 ..... 1010111 @r_vm
+ vwsubu_wx       110110 . ..... ..... 110 ..... 1010111 @r_vm
+ vwsub_wv        110111 . ..... ..... 010 ..... 1010111 @r_vm
+ vwsub_wx        110111 . ..... ..... 110 ..... 1010111 @r_vm
+-vadc_vvm        010000 1 ..... ..... 000 ..... 1010111 @r_vm_1
+-vadc_vxm        010000 1 ..... ..... 100 ..... 1010111 @r_vm_1
+-vadc_vim        010000 1 ..... ..... 011 ..... 1010111 @r_vm_1
+-vmadc_vvm       010001 1 ..... ..... 000 ..... 1010111 @r_vm_1
+-vmadc_vxm       010001 1 ..... ..... 100 ..... 1010111 @r_vm_1
+-vmadc_vim       010001 1 ..... ..... 011 ..... 1010111 @r_vm_1
+-vsbc_vvm        010010 1 ..... ..... 000 ..... 1010111 @r_vm_1
+-vsbc_vxm        010010 1 ..... ..... 100 ..... 1010111 @r_vm_1
+-vmsbc_vvm       010011 1 ..... ..... 000 ..... 1010111 @r_vm_1
+-vmsbc_vxm       010011 1 ..... ..... 100 ..... 1010111 @r_vm_1
++vadc_vvm        010000 0 ..... ..... 000 ..... 1010111 @r_vm_1
++vadc_vxm        010000 0 ..... ..... 100 ..... 1010111 @r_vm_1
++vadc_vim        010000 0 ..... ..... 011 ..... 1010111 @r_vm_1
++vmadc_vvm       010001 . ..... ..... 000 ..... 1010111 @r_vm
++vmadc_vxm       010001 . ..... ..... 100 ..... 1010111 @r_vm
++vmadc_vim       010001 . ..... ..... 011 ..... 1010111 @r_vm
++vsbc_vvm        010010 0 ..... ..... 000 ..... 1010111 @r_vm_1
++vsbc_vxm        010010 0 ..... ..... 100 ..... 1010111 @r_vm_1
++vmsbc_vvm       010011 . ..... ..... 000 ..... 1010111 @r_vm
++vmsbc_vxm       010011 . ..... ..... 100 ..... 1010111 @r_vm
+ vand_vv         001001 . ..... ..... 000 ..... 1010111 @r_vm
+ vand_vx         001001 . ..... ..... 100 ..... 1010111 @r_vm
+ vand_vi         001001 . ..... ..... 011 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 75a5d6f7a5..6ffbf5ea08 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -1774,7 +1774,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+ 
+ /*
+  * For vadc and vsbc, an illegal instruction exception is raised if the
+- * destination vector register is v0 and LMUL > 1. (Section 12.3)
++ * destination vector register is v0 and LMUL > 1. (Section 12.4)
+  */
+ static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
+ {
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 55b55e5b59..f351743114 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -1153,7 +1153,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     for (i = 0; i < vl; i++) {                                \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+-        uint8_t carry = vext_elem_mask(v0, i);                \
++        ETYPE carry = vext_elem_mask(v0, i);                  \
+                                                               \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry);         \
+     }                                                         \
+@@ -1178,7 +1178,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,        \
+                                                                          \
+     for (i = 0; i < vl; i++) {                                           \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                               \
+-        uint8_t carry = vext_elem_mask(v0, i);                           \
++        ETYPE carry = vext_elem_mask(v0, i);                             \
+                                                                          \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
+     }                                                                    \
+@@ -1203,19 +1203,15 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+                   CPURISCVState *env, uint32_t desc)          \
+ {                                                             \
+     uint32_t vl = env->vl;                                    \
+-    uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);        \
++    uint32_t vm = vext_vm(desc);                              \
+     uint32_t i;                                               \
+                                                               \
+     for (i = 0; i < vl; i++) {                                \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+-        uint8_t carry = vext_elem_mask(v0, i);                \
+-                                                              \
++        ETYPE carry = !vm && vext_elem_mask(v0, i);           \
+         vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry));      \
+     }                                                         \
+-    for (; i < vlmax; i++) {                                  \
+-        vext_set_elem_mask(vd, i, 0);                         \
+-    }                                                         \
+ }
+ 
+ GEN_VEXT_VMADC_VVM(vmadc_vvm_b, uint8_t,  H1, DO_MADC)
+@@ -1233,19 +1229,15 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,          \
+                   void *vs2, CPURISCVState *env, uint32_t desc) \
+ {                                                               \
+     uint32_t vl = env->vl;                                      \
+-    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
++    uint32_t vm = vext_vm(desc);                                \
+     uint32_t i;                                                 \
+                                                                 \
+     for (i = 0; i < vl; i++) {                                  \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                      \
+-        uint8_t carry = vext_elem_mask(v0, i);                  \
+-                                                                \
++        ETYPE carry = !vm && vext_elem_mask(v0, i);             \
+         vext_set_elem_mask(vd, i,                               \
+                 DO_OP(s2, (ETYPE)(target_long)s1, carry));      \
+     }                                                           \
+-    for (; i < vlmax; i++) {                                    \
+-        vext_set_elem_mask(vd, i, 0);                           \
+-    }                                                           \
+ }
+ 
+ GEN_VEXT_VMADC_VXM(vmadc_vxm_b, uint8_t,  H1, DO_MADC)
+-- 
+2.33.1
+

+ 195 - 0
recipes-devtools/qemu/qemu/0051-target-riscv-rvv-1.0-narrowing-integer-right-shift-i.patch

@@ -0,0 +1,195 @@
+From 2e5f58888215b897d65552d1ecbca7110724fb16 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 12:17:33 +0800
+Subject: [PATCH 051/107] target/riscv: rvv-1.0: narrowing integer right shift
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   | 24 ++++++++++----------
+ target/riscv/insn32.decode              | 12 +++++-----
+ target/riscv/insn_trans/trans_rvv.c.inc | 30 ++++++++++++-------------
+ target/riscv/vector_helper.c            | 24 ++++++++++----------
+ 4 files changed, 45 insertions(+), 45 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 9ff330e6d8..b582fe8720 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -426,18 +426,18 @@ DEF_HELPER_6(vsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vsra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+ 
+-DEF_HELPER_6(vnsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnsra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnsrl_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnsrl_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnsrl_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnsra_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnsra_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnsra_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnsrl_wx_b, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnsrl_wx_h, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnsrl_wx_w, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnsra_wx_b, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnsra_wx_h, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnsra_wx_w, void, ptr, ptr, tl, ptr, env, i32)
+ 
+ DEF_HELPER_6(vmseq_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vmseq_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 462ec504b3..d3d70b47c1 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -381,12 +381,12 @@ vsrl_vi         101000 . ..... ..... 011 ..... 1010111 @r_vm
+ vsra_vv         101001 . ..... ..... 000 ..... 1010111 @r_vm
+ vsra_vx         101001 . ..... ..... 100 ..... 1010111 @r_vm
+ vsra_vi         101001 . ..... ..... 011 ..... 1010111 @r_vm
+-vnsrl_vv        101100 . ..... ..... 000 ..... 1010111 @r_vm
+-vnsrl_vx        101100 . ..... ..... 100 ..... 1010111 @r_vm
+-vnsrl_vi        101100 . ..... ..... 011 ..... 1010111 @r_vm
+-vnsra_vv        101101 . ..... ..... 000 ..... 1010111 @r_vm
+-vnsra_vx        101101 . ..... ..... 100 ..... 1010111 @r_vm
+-vnsra_vi        101101 . ..... ..... 011 ..... 1010111 @r_vm
++vnsrl_wv        101100 . ..... ..... 000 ..... 1010111 @r_vm
++vnsrl_wx        101100 . ..... ..... 100 ..... 1010111 @r_vm
++vnsrl_wi        101100 . ..... ..... 011 ..... 1010111 @r_vm
++vnsra_wv        101101 . ..... ..... 000 ..... 1010111 @r_vm
++vnsra_wx        101101 . ..... ..... 100 ..... 1010111 @r_vm
++vnsra_wi        101101 . ..... ..... 011 ..... 1010111 @r_vm
+ vmseq_vv        011000 . ..... ..... 000 ..... 1010111 @r_vm
+ vmseq_vx        011000 . ..... ..... 100 ..... 1010111 @r_vm
+ vmseq_vi        011000 . ..... ..... 011 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 6ffbf5ea08..ebbb415c17 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -1920,7 +1920,7 @@ GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
+ GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
+ 
+ /* Vector Narrowing Integer Right Shift Instructions */
+-static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
++static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
+ {
+     return require_rvv(s) &&
+            vext_check_isa_ill(s) &&
+@@ -1928,10 +1928,10 @@ static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
+ }
+ 
+ /* OPIVV with NARROW */
+-#define GEN_OPIVV_NARROW_TRANS(NAME)                               \
++#define GEN_OPIWV_NARROW_TRANS(NAME)                               \
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+ {                                                                  \
+-    if (opivv_narrow_check(s, a)) {                                \
++    if (opiwv_narrow_check(s, a)) {                                \
+         uint32_t data = 0;                                         \
+         static gen_helper_gvec_4_ptr * const fns[3] = {            \
+             gen_helper_##NAME##_b,                                 \
+@@ -1953,10 +1953,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+     }                                                              \
+     return false;                                                  \
+ }
+-GEN_OPIVV_NARROW_TRANS(vnsra_vv)
+-GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
++GEN_OPIWV_NARROW_TRANS(vnsra_wv)
++GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
+ 
+-static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
++static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
+ {
+     return require_rvv(s) &&
+            vext_check_isa_ill(s) &&
+@@ -1964,10 +1964,10 @@ static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
+ }
+ 
+ /* OPIVX with NARROW */
+-#define GEN_OPIVX_NARROW_TRANS(NAME)                                     \
++#define GEN_OPIWX_NARROW_TRANS(NAME)                                     \
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+ {                                                                        \
+-    if (opivx_narrow_check(s, a)) {                                      \
++    if (opiwx_narrow_check(s, a)) {                                      \
+         static gen_helper_opivx * const fns[3] = {                       \
+             gen_helper_##NAME##_b,                                       \
+             gen_helper_##NAME##_h,                                       \
+@@ -1978,14 +1978,14 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+     return false;                                                        \
+ }
+ 
+-GEN_OPIVX_NARROW_TRANS(vnsra_vx)
+-GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
++GEN_OPIWX_NARROW_TRANS(vnsra_wx)
++GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
+ 
+-/* OPIVI with NARROW */
+-#define GEN_OPIVI_NARROW_TRANS(NAME, IMM_MODE, OPIVX)                    \
++/* OPIWI with NARROW */
++#define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX)                    \
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+ {                                                                        \
+-    if (opivx_narrow_check(s, a)) {                                      \
++    if (opiwx_narrow_check(s, a)) {                                      \
+         static gen_helper_opivx * const fns[3] = {                       \
+             gen_helper_##OPIVX##_b,                                      \
+             gen_helper_##OPIVX##_h,                                      \
+@@ -1997,8 +1997,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
+     return false;                                                        \
+ }
+ 
+-GEN_OPIVI_NARROW_TRANS(vnsra_vi, IMM_ZX, vnsra_vx)
+-GEN_OPIVI_NARROW_TRANS(vnsrl_vi, IMM_ZX, vnsrl_vx)
++GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
++GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
+ 
+ /* Vector Integer Comparison Instructions */
+ /*
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index f351743114..19542c9f5a 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -1373,18 +1373,18 @@ GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f)
+ GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f)
+ 
+ /* Vector Narrowing Integer Right Shift Instructions */
+-GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t,  uint16_t, H1, H2, DO_SRL, 0xf)
+-GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
+-GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
+-GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t,  int16_t, H1, H2, DO_SRL, 0xf)
+-GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f)
+-GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f)
+-GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf)
+-GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
+-GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
+-GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf)
+-GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f)
+-GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f)
++GEN_VEXT_SHIFT_VV(vnsrl_wv_b, uint8_t,  uint16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VV(vnsrl_wv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VV(vnsrl_wv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
++GEN_VEXT_SHIFT_VV(vnsra_wv_b, uint8_t,  int16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VV(vnsra_wv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VV(vnsra_wv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f)
++GEN_VEXT_SHIFT_VX(vnsrl_wx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VX(vnsrl_wx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VX(vnsrl_wx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f)
++GEN_VEXT_SHIFT_VX(vnsra_wx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf)
++GEN_VEXT_SHIFT_VX(vnsra_wx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f)
++GEN_VEXT_SHIFT_VX(vnsra_wx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f)
+ 
+ /* Vector Integer Comparison Instructions */
+ #define DO_MSEQ(N, M) (N == M)
+-- 
+2.33.1
+

+ 32 - 0
recipes-devtools/qemu/qemu/0052-target-riscv-rvv-1.0-widening-integer-multiply-add-i.patch

@@ -0,0 +1,32 @@
+From 1861022b951f91710d6021c911d94429945ff814 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 12:19:00 +0800
+Subject: [PATCH 052/107] target/riscv: rvv-1.0: widening integer multiply-add
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn32.decode | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index d3d70b47c1..0217ec19f8 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -449,9 +449,9 @@ vwmaccu_vv      111100 . ..... ..... 010 ..... 1010111 @r_vm
+ vwmaccu_vx      111100 . ..... ..... 110 ..... 1010111 @r_vm
+ vwmacc_vv       111101 . ..... ..... 010 ..... 1010111 @r_vm
+ vwmacc_vx       111101 . ..... ..... 110 ..... 1010111 @r_vm
+-vwmaccsu_vv     111110 . ..... ..... 010 ..... 1010111 @r_vm
+-vwmaccsu_vx     111110 . ..... ..... 110 ..... 1010111 @r_vm
+-vwmaccus_vx     111111 . ..... ..... 110 ..... 1010111 @r_vm
++vwmaccsu_vv     111111 . ..... ..... 010 ..... 1010111 @r_vm
++vwmaccsu_vx     111111 . ..... ..... 110 ..... 1010111 @r_vm
++vwmaccus_vx     111110 . ..... ..... 110 ..... 1010111 @r_vm
+ vmv_v_v         010111 1 00000 ..... 000 ..... 1010111 @r2
+ vmv_v_x         010111 1 00000 ..... 100 ..... 1010111 @r2
+ vmv_v_i         010111 1 00000 ..... 011 ..... 1010111 @r2
+-- 
+2.33.1
+

+ 30 - 0
recipes-devtools/qemu/qemu/0053-target-riscv-rvv-1.0-single-width-saturating-add-and.patch

@@ -0,0 +1,30 @@
+From edd72346d33d93a26117ba9097ffa3c120e62411 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 13:34:56 +0800
+Subject: [PATCH 053/107] target/riscv: rvv-1.0: single-width saturating add
+ and subtract instructions
+
+Sign-extend vsaddu.vi immediate value.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index ebbb415c17..715b09eda9 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2232,7 +2232,7 @@ GEN_OPIVX_TRANS(vsaddu_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vsadd_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vssubu_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vssub_vx,  opivx_check)
+-GEN_OPIVI_TRANS(vsaddu_vi, IMM_ZX, vsaddu_vx, opivx_check)
++GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
+ GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
+ 
+ /* Vector Single-Width Averaging Add and Subtract */
+-- 
+2.33.1
+

+ 76 - 0
recipes-devtools/qemu/qemu/0054-target-riscv-rvv-1.0-integer-comparison-instructions.patch

@@ -0,0 +1,76 @@
+From 2b28bc696cde7658f9e987f3d001e2ac51356223 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 4 Aug 2020 11:06:25 +0800
+Subject: [PATCH 054/107] target/riscv: rvv-1.0: integer comparison
+ instructions
+
+* Sign-extend vmselu.vi and vmsgtu.vi immediate values.
+* Remove "set tail elements to zeros" as tail elements can be unchanged
+  for either VTA to have undisturbed or agnostic setting.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 4 ++--
+ target/riscv/vector_helper.c            | 8 --------
+ 2 files changed, 2 insertions(+), 10 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 715b09eda9..5f02399ad3 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2038,9 +2038,9 @@ GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
+ 
+ GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
+ GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
+-GEN_OPIVI_TRANS(vmsleu_vi, IMM_ZX, vmsleu_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
+ GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
+-GEN_OPIVI_TRANS(vmsgtu_vi, IMM_ZX, vmsgtu_vx, opivx_cmp_check)
++GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
+ GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
+ 
+ /* Vector Integer Min/Max Instructions */
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 19542c9f5a..f9c2f9cd89 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -1399,7 +1399,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+ {                                                             \
+     uint32_t vm = vext_vm(desc);                              \
+     uint32_t vl = env->vl;                                    \
+-    uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);        \
+     uint32_t i;                                               \
+                                                               \
+     for (i = 0; i < vl; i++) {                                \
+@@ -1410,9 +1409,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+         }                                                     \
+         vext_set_elem_mask(vd, i, DO_OP(s2, s1));             \
+     }                                                         \
+-    for (; i < vlmax; i++) {                                  \
+-        vext_set_elem_mask(vd, i, 0);                         \
+-    }                                                         \
+ }
+ 
+ GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t,  H1, DO_MSEQ)
+@@ -1451,7 +1447,6 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,   \
+ {                                                                   \
+     uint32_t vm = vext_vm(desc);                                    \
+     uint32_t vl = env->vl;                                          \
+-    uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);              \
+     uint32_t i;                                                     \
+                                                                     \
+     for (i = 0; i < vl; i++) {                                      \
+@@ -1462,9 +1457,6 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,   \
+         vext_set_elem_mask(vd, i,                                   \
+                 DO_OP(s2, (ETYPE)(target_long)s1));                 \
+     }                                                               \
+-    for (; i < vlmax; i++) {                                        \
+-        vext_set_elem_mask(vd, i, 0);                               \
+-    }                                                               \
+ }
+ 
+ GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t,  H1, DO_MSEQ)
+-- 
+2.33.1
+

+ 55 - 0
recipes-devtools/qemu/qemu/0055-target-riscv-rvv-1.0-floating-point-compare-instruct.patch

@@ -0,0 +1,55 @@
+From 4fa4dce6c817081c33c7b89bae8e0866d0a4a920 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 30 Jul 2020 21:29:29 +0800
+Subject: [PATCH 055/107] target/riscv: rvv-1.0: floating-point compare
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/vector_helper.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index f9c2f9cd89..ef3875ee7d 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -3919,7 +3919,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+ {                                                             \
+     uint32_t vm = vext_vm(desc);                              \
+     uint32_t vl = env->vl;                                    \
+-    uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE);        \
+     uint32_t i;                                               \
+                                                               \
+     for (i = 0; i < vl; i++) {                                \
+@@ -3931,9 +3930,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+         vext_set_elem_mask(vd, i,                             \
+                            DO_OP(s2, s1, &env->fp_status));   \
+     }                                                         \
+-    for (; i < vlmax; i++) {                                  \
+-        vext_set_elem_mask(vd, i, 0);                         \
+-    }                                                         \
+ }
+ 
+ GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet)
+@@ -3946,7 +3942,6 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2,       \
+ {                                                                   \
+     uint32_t vm = vext_vm(desc);                                    \
+     uint32_t vl = env->vl;                                          \
+-    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE)));     \
+     uint32_t i;                                                     \
+                                                                     \
+     for (i = 0; i < vl; i++) {                                      \
+@@ -3957,9 +3952,6 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2,       \
+         vext_set_elem_mask(vd, i,                                   \
+                            DO_OP(s2, (ETYPE)s1, &env->fp_status));  \
+     }                                                               \
+-    for (; i < vlmax; i++) {                                        \
+-        vext_set_elem_mask(vd, i, 0);                               \
+-    }                                                               \
+ }
+ 
+ GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet)
+-- 
+2.33.1
+

+ 52 - 0
recipes-devtools/qemu/qemu/0056-target-riscv-rvv-1.0-mask-register-logical-instructi.patch

@@ -0,0 +1,52 @@
+From 33266e54a9e325764d97200944d68f04a2bdcd07 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 30 Jul 2020 21:31:27 +0800
+Subject: [PATCH 056/107] target/riscv: rvv-1.0: mask-register logical
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 3 ++-
+ target/riscv/vector_helper.c            | 4 ----
+ 2 files changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 5f02399ad3..73537bc3ad 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2881,7 +2881,8 @@ GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
+ #define GEN_MM_TRANS(NAME)                                         \
+ static bool trans_##NAME(DisasContext *s, arg_r *a)                \
+ {                                                                  \
+-    if (vext_check_isa_ill(s)) {                                   \
++    if (require_rvv(s) &&                                          \
++        vext_check_isa_ill(s)) {                                   \
+         uint32_t data = 0;                                         \
+         gen_helper_gvec_4_ptr *fn = gen_helper_##NAME;             \
+         TCGLabel *over = gen_new_label();                          \
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index ef3875ee7d..6af15eb8f2 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4440,7 +4440,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+                   void *vs2, CPURISCVState *env,          \
+                   uint32_t desc)                          \
+ {                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+     int a, b;                                             \
+@@ -4450,9 +4449,6 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+         b = vext_elem_mask(vs2, i);                       \
+         vext_set_elem_mask(vd, i, OP(b, a));              \
+     }                                                     \
+-    for (; i < vlmax; i++) {                              \
+-        vext_set_elem_mask(vd, i, 0);                     \
+-    }                                                     \
+ }
+ 
+ #define DO_NAND(N, M)  (!(N & M))
+-- 
+2.33.1
+

+ 51 - 0
recipes-devtools/qemu/qemu/0057-target-riscv-rvv-1.0-slide-instructions.patch

@@ -0,0 +1,51 @@
+From 1e7c859fbc49a3fa4e2b9057b576abde7611ef0a Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 17 Aug 2020 14:45:17 +0800
+Subject: [PATCH 057/107] target/riscv: rvv-1.0: slide instructions
+
+* Remove clear function from helper functions as the tail elements
+  are unchanged in RVV 1.0.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/vector_helper.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 6af15eb8f2..745fb01aa1 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4639,17 +4639,22 @@ GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8)
+ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = env_archcpu(env)->cfg.vlen;                          \
++    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE)));           \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+-    target_ulong offset = s1, i;                                          \
++    target_ulong i_max, i;                                                \
+                                                                           \
+-    for (i = 0; i < vl; ++i) {                                            \
+-        target_ulong j = i + offset;                                      \
+-        if (!vm && !vext_elem_mask(v0, i)) {                              \
+-            continue;                                                     \
++    i_max = MIN(s1 < vlmax ? vlmax - s1 : 0, vl);                         \
++    for (i = 0; i < i_max; ++i) {                                         \
++        if (vm || vext_elem_mask(v0, i)) {                                \
++            *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1));          \
++        }                                                                 \
++    }                                                                     \
++                                                                          \
++    for (i = i_max; i < vl; ++i) {                                        \
++        if (vm || vext_elem_mask(v0, i)) {                                \
++            *((ETYPE *)vd + H(i)) = 0;                                    \
+         }                                                                 \
+-        *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j));  \
+     }                                                                     \
+ }
+ 
+-- 
+2.33.1
+

+ 238 - 0
recipes-devtools/qemu/qemu/0058-target-riscv-rvv-1.0-floating-point-slide-instructio.patch

@@ -0,0 +1,238 @@
+From 03f8694b21e0ea6ae46eedfe9ecaa6bc4b7863e7 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 3 Aug 2020 19:59:31 +0800
+Subject: [PATCH 058/107] target/riscv: rvv-1.0: floating-point slide
+ instructions
+
+Add the following instructions:
+
+* vfslide1up.vf
+* vfslide1down.vf
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/helper.h                   |   7 ++
+ target/riscv/insn32.decode              |   2 +
+ target/riscv/insn_trans/trans_rvv.c.inc |  16 +++
+ target/riscv/vector_helper.c            | 141 ++++++++++++++++--------
+ 4 files changed, 121 insertions(+), 45 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index b582fe8720..444dbca071 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1146,6 +1146,13 @@ DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+ 
++DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32)
++DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32)
++DEF_HELPER_6(vfslide1up_vf_d, void, ptr, ptr, i64, ptr, env, i32)
++DEF_HELPER_6(vfslide1down_vf_h, void, ptr, ptr, i64, ptr, env, i32)
++DEF_HELPER_6(vfslide1down_vf_w, void, ptr, ptr, i64, ptr, env, i32)
++DEF_HELPER_6(vfslide1down_vf_d, void, ptr, ptr, i64, ptr, env, i32)
++
+ DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 0217ec19f8..169b73fdc4 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -552,6 +552,8 @@ vfsgnjn_vv      001001 . ..... ..... 001 ..... 1010111 @r_vm
+ vfsgnjn_vf      001001 . ..... ..... 101 ..... 1010111 @r_vm
+ vfsgnjx_vv      001010 . ..... ..... 001 ..... 1010111 @r_vm
+ vfsgnjx_vf      001010 . ..... ..... 101 ..... 1010111 @r_vm
++vfslide1up_vf   001110 . ..... ..... 101 ..... 1010111 @r_vm
++vfslide1down_vf 001111 . ..... ..... 101 ..... 1010111 @r_vm
+ vmfeq_vv        011000 . ..... ..... 001 ..... 1010111 @r_vm
+ vmfeq_vf        011000 . ..... ..... 101 ..... 1010111 @r_vm
+ vmfne_vv        011100 . ..... ..... 001 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 73537bc3ad..c1aaf01bdc 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3357,6 +3357,22 @@ GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
+ GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
+ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
+ 
++/* Vector Floating-Point Slide Instructions */
++static bool fslideup_check(DisasContext *s, arg_rmrr *a)
++{
++    return slideup_check(s, a) &&
++           require_rvf(s);
++}
++
++static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
++{
++    return slidedown_check(s, a) &&
++           require_rvf(s);
++}
++
++GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
++GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
++
+ /* Vector Register Gather Instruction */
+ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
+ {
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 745fb01aa1..3f9556f4d4 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4664,57 +4664,108 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2)
+ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
+ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
+ 
+-#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H)                             \
+-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+-                  CPURISCVState *env, uint32_t desc)                      \
+-{                                                                         \
+-    uint32_t vm = vext_vm(desc);                                          \
+-    uint32_t vl = env->vl;                                                \
+-    uint32_t i;                                                           \
+-                                                                          \
+-    for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, i)) {                              \
+-            continue;                                                     \
+-        }                                                                 \
+-        if (i == 0) {                                                     \
+-            *((ETYPE *)vd + H(i)) = s1;                                   \
+-        } else {                                                          \
+-            *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1));           \
+-        }                                                                 \
+-    }                                                                     \
++#define GEN_VEXT_VSLIE1UP(ESZ, H)                                           \
++static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
++                     CPURISCVState *env, uint32_t desc)                     \
++{                                                                           \
++    typedef uint##ESZ##_t ETYPE;                                            \
++    uint32_t vm = vext_vm(desc);                                            \
++    uint32_t vl = env->vl;                                                  \
++    uint32_t i;                                                             \
++                                                                            \
++    for (i = 0; i < vl; i++) {                                              \
++        if (!vm && !vext_elem_mask(v0, i)) {                                \
++            continue;                                                       \
++        }                                                                   \
++        if (i == 0) {                                                       \
++            *((ETYPE *)vd + H(i)) = s1;                                     \
++        } else {                                                            \
++            *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1));             \
++        }                                                                   \
++    }                                                                       \
++}
++
++GEN_VEXT_VSLIE1UP(8,  H1)
++GEN_VEXT_VSLIE1UP(16, H2)
++GEN_VEXT_VSLIE1UP(32, H4)
++GEN_VEXT_VSLIE1UP(64, H8)
++
++#define GEN_VEXT_VSLIDE1UP_VX(NAME, ESZ)                          \
++void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
++                  CPURISCVState *env, uint32_t desc)              \
++{                                                                 \
++    vslide1up_##ESZ(vd, v0, s1, vs2, env, desc);                  \
+ }
+ 
+ /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t,  H1)
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2)
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4)
+-GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8)
+-
+-#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H)                           \
+-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+-                  CPURISCVState *env, uint32_t desc)                      \
+-{                                                                         \
+-    uint32_t vm = vext_vm(desc);                                          \
+-    uint32_t vl = env->vl;                                                \
+-    uint32_t i;                                                           \
+-                                                                          \
+-    for (i = 0; i < vl; i++) {                                            \
+-        if (!vm && !vext_elem_mask(v0, i)) {                              \
+-            continue;                                                     \
+-        }                                                                 \
+-        if (i == vl - 1) {                                                \
+-            *((ETYPE *)vd + H(i)) = s1;                                   \
+-        } else {                                                          \
+-            *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1));           \
+-        }                                                                 \
+-    }                                                                     \
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, 8)
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, 16)
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32)
++GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64)
++
++#define GEN_VEXT_VSLIDE1DOWN(ESZ, H)                                          \
++static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
++                       CPURISCVState *env, uint32_t desc)                     \
++{                                                                             \
++    typedef uint##ESZ##_t ETYPE;                                              \
++    uint32_t vm = vext_vm(desc);                                              \
++    uint32_t vl = env->vl;                                                    \
++    uint32_t i;                                                               \
++                                                                              \
++    for (i = 0; i < vl; i++) {                                                \
++        if (!vm && !vext_elem_mask(v0, i)) {                                  \
++            continue;                                                         \
++        }                                                                     \
++        if (i == vl - 1) {                                                    \
++            *((ETYPE *)vd + H(i)) = s1;                                       \
++        } else {                                                              \
++            *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1));               \
++        }                                                                     \
++    }                                                                         \
++}
++
++GEN_VEXT_VSLIDE1DOWN(8,  H1)
++GEN_VEXT_VSLIDE1DOWN(16, H2)
++GEN_VEXT_VSLIDE1DOWN(32, H4)
++GEN_VEXT_VSLIDE1DOWN(64, H8)
++
++#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ESZ)                        \
++void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
++                  CPURISCVState *env, uint32_t desc)              \
++{                                                                 \
++    vslide1down_##ESZ(vd, v0, s1, vs2, env, desc);                \
+ }
+ 
+ /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t,  H1)
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2)
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4)
+-GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, 8)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, 16)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, 32)
++GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, 64)
++
++/* Vector Floating-Point Slide Instructions */
++#define GEN_VEXT_VFSLIDE1UP_VF(NAME, ESZ)                     \
++void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
++                  CPURISCVState *env, uint32_t desc)          \
++{                                                             \
++    vslide1up_##ESZ(vd, v0, s1, vs2, env, desc);              \
++}
++
++/* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */
++GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h, 16)
++GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w, 32)
++GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d, 64)
++
++#define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, ESZ)                   \
++void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
++                  CPURISCVState *env, uint32_t desc)          \
++{                                                             \
++    vslide1down_##ESZ(vd, v0, s1, vs2, env, desc);            \
++}
++
++/* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */
++GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h, 16)
++GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w, 32)
++GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64)
+ 
+ /* Vector Register Gather Instruction */
+ #define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2)                    \
+-- 
+2.33.1
+

+ 177 - 0
recipes-devtools/qemu/qemu/0059-target-riscv-rvv-1.0-narrowing-fixed-point-clip-inst.patch

@@ -0,0 +1,177 @@
+From 41602bba0f77b79025625f539bd61bbc99d8e654 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 16:40:44 +0800
+Subject: [PATCH 059/107] target/riscv: rvv-1.0: narrowing fixed-point clip
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   | 24 ++++++------
+ target/riscv/insn32.decode              | 12 +++---
+ target/riscv/insn_trans/trans_rvv.c.inc | 12 +++---
+ target/riscv/vector_helper.c            | 52 ++++++++++++-------------
+ 4 files changed, 50 insertions(+), 50 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 444dbca071..7ba8c77c6f 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -800,18 +800,18 @@ DEF_HELPER_6(vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+ 
+-DEF_HELPER_6(vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnclip_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnclip_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnclip_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnclipu_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnclipu_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnclipu_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
++DEF_HELPER_6(vnclipu_wx_b, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnclipu_wx_h, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnclipu_wx_w, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnclip_wx_b, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnclip_wx_h, void, ptr, ptr, tl, ptr, env, i32)
++DEF_HELPER_6(vnclip_wx_w, void, ptr, ptr, tl, ptr, env, i32)
+ 
+ DEF_HELPER_6(vfadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vfadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 169b73fdc4..302b687a2d 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -491,12 +491,12 @@ vssrl_vi        101010 . ..... ..... 011 ..... 1010111 @r_vm
+ vssra_vv        101011 . ..... ..... 000 ..... 1010111 @r_vm
+ vssra_vx        101011 . ..... ..... 100 ..... 1010111 @r_vm
+ vssra_vi        101011 . ..... ..... 011 ..... 1010111 @r_vm
+-vnclipu_vv      101110 . ..... ..... 000 ..... 1010111 @r_vm
+-vnclipu_vx      101110 . ..... ..... 100 ..... 1010111 @r_vm
+-vnclipu_vi      101110 . ..... ..... 011 ..... 1010111 @r_vm
+-vnclip_vv       101111 . ..... ..... 000 ..... 1010111 @r_vm
+-vnclip_vx       101111 . ..... ..... 100 ..... 1010111 @r_vm
+-vnclip_vi       101111 . ..... ..... 011 ..... 1010111 @r_vm
++vnclipu_wv      101110 . ..... ..... 000 ..... 1010111 @r_vm
++vnclipu_wx      101110 . ..... ..... 100 ..... 1010111 @r_vm
++vnclipu_wi      101110 . ..... ..... 011 ..... 1010111 @r_vm
++vnclip_wv       101111 . ..... ..... 000 ..... 1010111 @r_vm
++vnclip_wx       101111 . ..... ..... 100 ..... 1010111 @r_vm
++vnclip_wi       101111 . ..... ..... 011 ..... 1010111 @r_vm
+ vfadd_vv        000000 . ..... ..... 001 ..... 1010111 @r_vm
+ vfadd_vf        000000 . ..... ..... 101 ..... 1010111 @r_vm
+ vfsub_vv        000010 . ..... ..... 001 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index c1aaf01bdc..aac29a31a7 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2267,12 +2267,12 @@ GEN_OPIVI_TRANS(vssrl_vi, IMM_ZX, vssrl_vx, opivx_check)
+ GEN_OPIVI_TRANS(vssra_vi, IMM_SX, vssra_vx, opivx_check)
+ 
+ /* Vector Narrowing Fixed-Point Clip Instructions */
+-GEN_OPIVV_NARROW_TRANS(vnclipu_vv)
+-GEN_OPIVV_NARROW_TRANS(vnclip_vv)
+-GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
+-GEN_OPIVX_NARROW_TRANS(vnclip_vx)
+-GEN_OPIVI_NARROW_TRANS(vnclipu_vi, IMM_ZX, vnclipu_vx)
+-GEN_OPIVI_NARROW_TRANS(vnclip_vi, IMM_ZX, vnclip_vx)
++GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
++GEN_OPIWV_NARROW_TRANS(vnclip_wv)
++GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
++GEN_OPIWX_NARROW_TRANS(vnclip_wx)
++GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
++GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
+ 
+ /*
+  *** Vector Float Point Arithmetic Instructions
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 3f9556f4d4..dd18ca26f8 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -3084,19 +3084,19 @@ vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
+     }
+ }
+ 
+-RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
+-RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
+-RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
+-GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1)
+-GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2)
+-GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4)
+-
+-RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8)
+-RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16)
+-RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32)
+-GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1)
+-GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2)
+-GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4)
++RVVCALL(OPIVV2_RM, vnclip_wv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
++RVVCALL(OPIVV2_RM, vnclip_wv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
++RVVCALL(OPIVV2_RM, vnclip_wv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
++GEN_VEXT_VV_RM(vnclip_wv_b, 1, 1)
++GEN_VEXT_VV_RM(vnclip_wv_h, 2, 2)
++GEN_VEXT_VV_RM(vnclip_wv_w, 4, 4)
++
++RVVCALL(OPIVX2_RM, vnclip_wx_b, NOP_SSS_B, H1, H2, vnclip8)
++RVVCALL(OPIVX2_RM, vnclip_wx_h, NOP_SSS_H, H2, H4, vnclip16)
++RVVCALL(OPIVX2_RM, vnclip_wx_w, NOP_SSS_W, H4, H8, vnclip32)
++GEN_VEXT_VX_RM(vnclip_wx_b, 1, 1)
++GEN_VEXT_VX_RM(vnclip_wx_h, 2, 2)
++GEN_VEXT_VX_RM(vnclip_wx_w, 4, 4)
+ 
+ static inline uint8_t
+ vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
+@@ -3134,7 +3134,7 @@ static inline uint32_t
+ vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
+ {
+     uint8_t round, shift = b & 0x3f;
+-    int64_t res;
++    uint64_t res;
+ 
+     round = get_round(vxrm, a, shift);
+     res   = (a >> shift)  + round;
+@@ -3146,19 +3146,19 @@ vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
+     }
+ }
+ 
+-RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
+-RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
+-RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
+-GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1)
+-GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2)
+-GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4)
++RVVCALL(OPIVV2_RM, vnclipu_wv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
++RVVCALL(OPIVV2_RM, vnclipu_wv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
++RVVCALL(OPIVV2_RM, vnclipu_wv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
++GEN_VEXT_VV_RM(vnclipu_wv_b, 1, 1)
++GEN_VEXT_VV_RM(vnclipu_wv_h, 2, 2)
++GEN_VEXT_VV_RM(vnclipu_wv_w, 4, 4)
+ 
+-RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8)
+-RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16)
+-RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32)
+-GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1)
+-GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2)
+-GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4)
++RVVCALL(OPIVX2_RM, vnclipu_wx_b, NOP_UUU_B, H1, H2, vnclipu8)
++RVVCALL(OPIVX2_RM, vnclipu_wx_h, NOP_UUU_H, H2, H4, vnclipu16)
++RVVCALL(OPIVX2_RM, vnclipu_wx_w, NOP_UUU_W, H4, H8, vnclipu32)
++GEN_VEXT_VX_RM(vnclipu_wx_b, 1, 1)
++GEN_VEXT_VX_RM(vnclipu_wx_h, 2, 2)
++GEN_VEXT_VX_RM(vnclipu_wx_w, 4, 4)
+ 
+ /*
+  *** Vector Float Point Arithmetic Instructions
+-- 
+2.33.1
+

+ 64 - 0
recipes-devtools/qemu/qemu/0060-target-riscv-rvv-1.0-single-width-floating-point-red.patch

@@ -0,0 +1,64 @@
+From 0b0e6f66c2d99d2ab72663a649e8d0fbf98823c9 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 5 Aug 2020 18:06:00 +0800
+Subject: [PATCH 060/107] target/riscv: rvv-1.0: single-width floating-point
+ reduction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 12 +++++++++---
+ target/riscv/vector_helper.c            | 12 ++++++------
+ 2 files changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index aac29a31a7..6273bef23a 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2866,9 +2866,15 @@ GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
+ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
+ 
+ /* Vector Single-Width Floating-Point Reduction Instructions */
+-GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
+-GEN_OPFVV_TRANS(vfredmax_vs, reduction_check)
+-GEN_OPFVV_TRANS(vfredmin_vs, reduction_check)
++static bool freduction_check(DisasContext *s, arg_rmrr *a)
++{
++    return reduction_check(s, a) &&
++           require_rvf(s);
++}
++
++GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
++GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
++GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
+ 
+ /* Vector Widening Floating-Point Reduction Instructions */
+ GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index dd18ca26f8..fb01d126d7 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4382,14 +4382,14 @@ GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add)
+ GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add)
+ 
+ /* Maximum value */
+-GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum)
+-GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum)
+-GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum)
++GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum_noprop)
++GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum_noprop)
++GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum_noprop)
+ 
+ /* Minimum value */
+-GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum)
+-GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum)
+-GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum)
++GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum_noprop)
++GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum_noprop)
++GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum_noprop)
+ 
+ /* Vector Widening Floating-Point Reduction Instructions */
+ /* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
+-- 
+2.33.1
+

+ 35 - 0
recipes-devtools/qemu/qemu/0061-target-riscv-rvv-1.0-widening-floating-point-reducti.patch

@@ -0,0 +1,35 @@
+From 90afc431254b8cf9351117af8fd4cdba06f1509b Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 5 Aug 2020 18:03:26 +0800
+Subject: [PATCH 061/107] target/riscv: rvv-1.0: widening floating-point
+ reduction instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 6273bef23a..f805fa52e2 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2877,7 +2877,14 @@ GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
+ GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
+ 
+ /* Vector Widening Floating-Point Reduction Instructions */
+-GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
++static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
++{
++    return reduction_widen_check(s, a) &&
++           require_scale_rvf(s) &&
++           (s->sew != MO_8);
++}
++
++GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
+ 
+ /*
+  *** Vector Mask Operations
+-- 
+2.33.1
+

+ 32 - 0
recipes-devtools/qemu/qemu/0062-target-riscv-rvv-1.0-single-width-scaling-shift-inst.patch

@@ -0,0 +1,32 @@
+From 9628ee8d1753f8c1efc9080c8634c455a77ebd7b Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 3 Aug 2020 22:35:18 +0800
+Subject: [PATCH 062/107] target/riscv: rvv-1.0: single-width scaling shift
+ instructions
+
+log(SEW) truncate vssra.vi immediate value.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index f805fa52e2..2c601435ce 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2263,8 +2263,8 @@ GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
+ GEN_OPIVV_TRANS(vssra_vv, opivv_check)
+ GEN_OPIVX_TRANS(vssrl_vx,  opivx_check)
+ GEN_OPIVX_TRANS(vssra_vx,  opivx_check)
+-GEN_OPIVI_TRANS(vssrl_vi, IMM_ZX, vssrl_vx, opivx_check)
+-GEN_OPIVI_TRANS(vssra_vi, IMM_SX, vssra_vx, opivx_check)
++GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
++GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
+ 
+ /* Vector Narrowing Fixed-Point Clip Instructions */
+ GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
+-- 
+2.33.1
+

+ 305 - 0
recipes-devtools/qemu/qemu/0063-target-riscv-rvv-1.0-remove-widening-saturating-scal.patch

@@ -0,0 +1,305 @@
+From 21fcf80f52cdf536c6196cacaa8048c5d751beaf Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 16:08:39 +0800
+Subject: [PATCH 063/107] target/riscv: rvv-1.0: remove widening saturating
+ scaled multiply-add
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   |  22 ---
+ target/riscv/insn32.decode              |   7 -
+ target/riscv/insn_trans/trans_rvv.c.inc |   9 --
+ target/riscv/vector_helper.c            | 205 ------------------------
+ 4 files changed, 243 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 7ba8c77c6f..69b9e687c0 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -761,28 +761,6 @@ DEF_HELPER_6(vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+ DEF_HELPER_6(vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+ 
+-DEF_HELPER_6(vwsmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+-DEF_HELPER_6(vwsmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+-
+ DEF_HELPER_6(vssrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vssrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vssrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 302b687a2d..11dece1383 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -478,13 +478,6 @@ vasubu_vv       001010 . ..... ..... 010 ..... 1010111 @r_vm
+ vasubu_vx       001010 . ..... ..... 110 ..... 1010111 @r_vm
+ vsmul_vv        100111 . ..... ..... 000 ..... 1010111 @r_vm
+ vsmul_vx        100111 . ..... ..... 100 ..... 1010111 @r_vm
+-vwsmaccu_vv     111100 . ..... ..... 000 ..... 1010111 @r_vm
+-vwsmaccu_vx     111100 . ..... ..... 100 ..... 1010111 @r_vm
+-vwsmacc_vv      111101 . ..... ..... 000 ..... 1010111 @r_vm
+-vwsmacc_vx      111101 . ..... ..... 100 ..... 1010111 @r_vm
+-vwsmaccsu_vv    111110 . ..... ..... 000 ..... 1010111 @r_vm
+-vwsmaccsu_vx    111110 . ..... ..... 100 ..... 1010111 @r_vm
+-vwsmaccus_vx    111111 . ..... ..... 100 ..... 1010111 @r_vm
+ vssrl_vv        101010 . ..... ..... 000 ..... 1010111 @r_vm
+ vssrl_vx        101010 . ..... ..... 100 ..... 1010111 @r_vm
+ vssrl_vi        101010 . ..... ..... 011 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 2c601435ce..73676cc030 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2249,15 +2249,6 @@ GEN_OPIVX_TRANS(vasubu_vx,  opivx_check)
+ GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
+ GEN_OPIVX_TRANS(vsmul_vx,  opivx_check)
+ 
+-/* Vector Widening Saturating Scaled Multiply-Add */
+-GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check)
+-GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check)
+-GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check)
+-GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx)
+-GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx)
+-GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx)
+-GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx)
+-
+ /* Vector Single-Width Scaling Shift Instructions */
+ GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
+ GEN_OPIVV_TRANS(vssra_vv, opivv_check)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index fb01d126d7..2bc586fbe5 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -2703,211 +2703,6 @@ GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2)
+ GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4)
+ GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8)
+ 
+-/* Vector Widening Saturating Scaled Multiply-Add */
+-static inline uint16_t
+-vwsmaccu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b,
+-          uint16_t c)
+-{
+-    uint8_t round;
+-    uint16_t res = (uint16_t)a * b;
+-
+-    round = get_round(vxrm, res, 4);
+-    res   = (res >> 4) + round;
+-    return saddu16(env, vxrm, c, res);
+-}
+-
+-static inline uint32_t
+-vwsmaccu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b,
+-           uint32_t c)
+-{
+-    uint8_t round;
+-    uint32_t res = (uint32_t)a * b;
+-
+-    round = get_round(vxrm, res, 8);
+-    res   = (res >> 8) + round;
+-    return saddu32(env, vxrm, c, res);
+-}
+-
+-static inline uint64_t
+-vwsmaccu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b,
+-           uint64_t c)
+-{
+-    uint8_t round;
+-    uint64_t res = (uint64_t)a * b;
+-
+-    round = get_round(vxrm, res, 16);
+-    res   = (res >> 16) + round;
+-    return saddu64(env, vxrm, c, res);
+-}
+-
+-#define OPIVV3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP)    \
+-static inline void                                                 \
+-do_##NAME(void *vd, void *vs1, void *vs2, int i,                   \
+-          CPURISCVState *env, int vxrm)                            \
+-{                                                                  \
+-    TX1 s1 = *((T1 *)vs1 + HS1(i));                                \
+-    TX2 s2 = *((T2 *)vs2 + HS2(i));                                \
+-    TD d = *((TD *)vd + HD(i));                                    \
+-    *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1, d);                \
+-}
+-
+-RVVCALL(OPIVV3_RM, vwsmaccu_vv_b, WOP_UUU_B, H2, H1, H1, vwsmaccu8)
+-RVVCALL(OPIVV3_RM, vwsmaccu_vv_h, WOP_UUU_H, H4, H2, H2, vwsmaccu16)
+-RVVCALL(OPIVV3_RM, vwsmaccu_vv_w, WOP_UUU_W, H8, H4, H4, vwsmaccu32)
+-GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2)
+-GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4)
+-GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8)
+-
+-#define OPIVX3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP)         \
+-static inline void                                                 \
+-do_##NAME(void *vd, target_long s1, void *vs2, int i,              \
+-          CPURISCVState *env, int vxrm)                            \
+-{                                                                  \
+-    TX2 s2 = *((T2 *)vs2 + HS2(i));                                \
+-    TD d = *((TD *)vd + HD(i));                                    \
+-    *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1, d);       \
+-}
+-
+-RVVCALL(OPIVX3_RM, vwsmaccu_vx_b, WOP_UUU_B, H2, H1, vwsmaccu8)
+-RVVCALL(OPIVX3_RM, vwsmaccu_vx_h, WOP_UUU_H, H4, H2, vwsmaccu16)
+-RVVCALL(OPIVX3_RM, vwsmaccu_vx_w, WOP_UUU_W, H8, H4, vwsmaccu32)
+-GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2)
+-GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4)
+-GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8)
+-
+-static inline int16_t
+-vwsmacc8(CPURISCVState *env, int vxrm, int8_t a, int8_t b, int16_t c)
+-{
+-    uint8_t round;
+-    int16_t res = (int16_t)a * b;
+-
+-    round = get_round(vxrm, res, 4);
+-    res   = (res >> 4) + round;
+-    return sadd16(env, vxrm, c, res);
+-}
+-
+-static inline int32_t
+-vwsmacc16(CPURISCVState *env, int vxrm, int16_t a, int16_t b, int32_t c)
+-{
+-    uint8_t round;
+-    int32_t res = (int32_t)a * b;
+-
+-    round = get_round(vxrm, res, 8);
+-    res   = (res >> 8) + round;
+-    return sadd32(env, vxrm, c, res);
+-
+-}
+-
+-static inline int64_t
+-vwsmacc32(CPURISCVState *env, int vxrm, int32_t a, int32_t b, int64_t c)
+-{
+-    uint8_t round;
+-    int64_t res = (int64_t)a * b;
+-
+-    round = get_round(vxrm, res, 16);
+-    res   = (res >> 16) + round;
+-    return sadd64(env, vxrm, c, res);
+-}
+-
+-RVVCALL(OPIVV3_RM, vwsmacc_vv_b, WOP_SSS_B, H2, H1, H1, vwsmacc8)
+-RVVCALL(OPIVV3_RM, vwsmacc_vv_h, WOP_SSS_H, H4, H2, H2, vwsmacc16)
+-RVVCALL(OPIVV3_RM, vwsmacc_vv_w, WOP_SSS_W, H8, H4, H4, vwsmacc32)
+-GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2)
+-GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4)
+-GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8)
+-RVVCALL(OPIVX3_RM, vwsmacc_vx_b, WOP_SSS_B, H2, H1, vwsmacc8)
+-RVVCALL(OPIVX3_RM, vwsmacc_vx_h, WOP_SSS_H, H4, H2, vwsmacc16)
+-RVVCALL(OPIVX3_RM, vwsmacc_vx_w, WOP_SSS_W, H8, H4, vwsmacc32)
+-GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2)
+-GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4)
+-GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8)
+-
+-static inline int16_t
+-vwsmaccsu8(CPURISCVState *env, int vxrm, uint8_t a, int8_t b, int16_t c)
+-{
+-    uint8_t round;
+-    int16_t res = a * (int16_t)b;
+-
+-    round = get_round(vxrm, res, 4);
+-    res   = (res >> 4) + round;
+-    return ssub16(env, vxrm, c, res);
+-}
+-
+-static inline int32_t
+-vwsmaccsu16(CPURISCVState *env, int vxrm, uint16_t a, int16_t b, uint32_t c)
+-{
+-    uint8_t round;
+-    int32_t res = a * (int32_t)b;
+-
+-    round = get_round(vxrm, res, 8);
+-    res   = (res >> 8) + round;
+-    return ssub32(env, vxrm, c, res);
+-}
+-
+-static inline int64_t
+-vwsmaccsu32(CPURISCVState *env, int vxrm, uint32_t a, int32_t b, int64_t c)
+-{
+-    uint8_t round;
+-    int64_t res = a * (int64_t)b;
+-
+-    round = get_round(vxrm, res, 16);
+-    res   = (res >> 16) + round;
+-    return ssub64(env, vxrm, c, res);
+-}
+-
+-RVVCALL(OPIVV3_RM, vwsmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, vwsmaccsu8)
+-RVVCALL(OPIVV3_RM, vwsmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, vwsmaccsu16)
+-RVVCALL(OPIVV3_RM, vwsmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, vwsmaccsu32)
+-GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2)
+-GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4)
+-GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8)
+-RVVCALL(OPIVX3_RM, vwsmaccsu_vx_b, WOP_SSU_B, H2, H1, vwsmaccsu8)
+-RVVCALL(OPIVX3_RM, vwsmaccsu_vx_h, WOP_SSU_H, H4, H2, vwsmaccsu16)
+-RVVCALL(OPIVX3_RM, vwsmaccsu_vx_w, WOP_SSU_W, H8, H4, vwsmaccsu32)
+-GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2)
+-GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4)
+-GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8)
+-
+-static inline int16_t
+-vwsmaccus8(CPURISCVState *env, int vxrm, int8_t a, uint8_t b, int16_t c)
+-{
+-    uint8_t round;
+-    int16_t res = (int16_t)a * b;
+-
+-    round = get_round(vxrm, res, 4);
+-    res   = (res >> 4) + round;
+-    return ssub16(env, vxrm, c, res);
+-}
+-
+-static inline int32_t
+-vwsmaccus16(CPURISCVState *env, int vxrm, int16_t a, uint16_t b, int32_t c)
+-{
+-    uint8_t round;
+-    int32_t res = (int32_t)a * b;
+-
+-    round = get_round(vxrm, res, 8);
+-    res   = (res >> 8) + round;
+-    return ssub32(env, vxrm, c, res);
+-}
+-
+-static inline int64_t
+-vwsmaccus32(CPURISCVState *env, int vxrm, int32_t a, uint32_t b, int64_t c)
+-{
+-    uint8_t round;
+-    int64_t res = (int64_t)a * b;
+-
+-    round = get_round(vxrm, res, 16);
+-    res   = (res >> 16) + round;
+-    return ssub64(env, vxrm, c, res);
+-}
+-
+-RVVCALL(OPIVX3_RM, vwsmaccus_vx_b, WOP_SUS_B, H2, H1, vwsmaccus8)
+-RVVCALL(OPIVX3_RM, vwsmaccus_vx_h, WOP_SUS_H, H4, H2, vwsmaccus16)
+-RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32)
+-GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2)
+-GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4)
+-GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8)
+-
+ /* Vector Single-Width Scaling Shift Instructions */
+ static inline uint8_t
+ vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
+-- 
+2.33.1
+

+ 85 - 0
recipes-devtools/qemu/qemu/0064-target-riscv-rvv-1.0-remove-vmford.vv-and-vmford.vf.patch

@@ -0,0 +1,85 @@
+From aa96a46b843a48309f61d551254657727d77c652 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 16:11:28 +0800
+Subject: [PATCH 064/107] target/riscv: rvv-1.0: remove vmford.vv and vmford.vf
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/helper.h                   | 6 ------
+ target/riscv/insn32.decode              | 2 --
+ target/riscv/insn_trans/trans_rvv.c.inc | 2 --
+ target/riscv/vector_helper.c            | 7 -------
+ 4 files changed, 17 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 69b9e687c0..1ad2df98ff 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -977,12 +977,6 @@ DEF_HELPER_6(vmfgt_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+ DEF_HELPER_6(vmfge_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+ DEF_HELPER_6(vmfge_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+ DEF_HELPER_6(vmfge_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+-DEF_HELPER_6(vmford_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vmford_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vmford_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_6(vmford_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+-DEF_HELPER_6(vmford_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+-DEF_HELPER_6(vmford_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+ 
+ DEF_HELPER_5(vfclass_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfclass_v_w, void, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 11dece1383..d6b9adf9aa 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -557,8 +557,6 @@ vmfle_vv        011001 . ..... ..... 001 ..... 1010111 @r_vm
+ vmfle_vf        011001 . ..... ..... 101 ..... 1010111 @r_vm
+ vmfgt_vf        011101 . ..... ..... 101 ..... 1010111 @r_vm
+ vmfge_vf        011111 . ..... ..... 101 ..... 1010111 @r_vm
+-vmford_vv       011010 . ..... ..... 001 ..... 1010111 @r_vm
+-vmford_vf       011010 . ..... ..... 101 ..... 1010111 @r_vm
+ vfclass_v       010011 . ..... 10000 001 ..... 1010111 @r2_vm
+ vfmerge_vfm     010111 0 ..... ..... 101 ..... 1010111 @r_vm_0
+ vfmv_v_f        010111 1 00000 ..... 101 ..... 1010111 @r2
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 73676cc030..215dd55bc9 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2655,7 +2655,6 @@ GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
+ GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
+ GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
+ GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
+-GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
+ 
+ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
+ {
+@@ -2671,7 +2670,6 @@ GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
+ GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
+ GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
+ GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
+-GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check)
+ 
+ /* Vector Floating-Point Classify Instruction */
+ GEN_OPFV_TRANS(vfclass_v, opfv_check)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 2bc586fbe5..b82fe7841a 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -3839,13 +3839,6 @@ GEN_VEXT_CMP_VF(vmfge_vf_h, uint16_t, H2, vmfge16)
+ GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
+ GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
+ 
+-GEN_VEXT_CMP_VV_ENV(vmford_vv_h, uint16_t, H2, !float16_unordered_quiet)
+-GEN_VEXT_CMP_VV_ENV(vmford_vv_w, uint32_t, H4, !float32_unordered_quiet)
+-GEN_VEXT_CMP_VV_ENV(vmford_vv_d, uint64_t, H8, !float64_unordered_quiet)
+-GEN_VEXT_CMP_VF(vmford_vf_h, uint16_t, H2, !float16_unordered_quiet)
+-GEN_VEXT_CMP_VF(vmford_vf_w, uint32_t, H4, !float32_unordered_quiet)
+-GEN_VEXT_CMP_VF(vmford_vf_d, uint64_t, H8, !float64_unordered_quiet)
+-
+ /* Vector Floating-Point Classify Instruction */
+ #define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP)         \
+ static void do_##NAME(void *vd, void *vs2, int i)      \
+-- 
+2.33.1
+

+ 69 - 0
recipes-devtools/qemu/qemu/0065-target-riscv-rvv-1.0-remove-integer-extract-instruct.patch

@@ -0,0 +1,69 @@
+From 04eef9250669b0efaa4fa93d975562d18616c9d8 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 2 Jul 2020 16:16:35 +0800
+Subject: [PATCH 065/107] target/riscv: rvv-1.0: remove integer extract
+ instruction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/insn32.decode              |  1 -
+ target/riscv/insn_trans/trans_rvv.c.inc | 23 -----------------------
+ 2 files changed, 24 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index d6b9adf9aa..8794397c74 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -607,7 +607,6 @@ viota_m         010100 . ..... 10000 010 ..... 1010111 @r2_vm
+ vid_v           010100 . 00000 10001 010 ..... 1010111 @r1_vm
+ vmv_x_s         010000 1 ..... 00000 010 ..... 1010111 @r2rd
+ vmv_s_x         010000 1 00000 ..... 110 ..... 1010111 @r2
+-vext_x_v        001100 1 ..... ..... 010 ..... 1010111 @r
+ vfmv_f_s        010000 1 ..... 00000 001 ..... 1010111 @r2rd
+ vfmv_s_f        010000 1 00000 ..... 101 ..... 1010111 @r2
+ vslideup_vx     001110 . ..... ..... 100 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 215dd55bc9..8cc79f1228 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3071,8 +3071,6 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
+  *** Vector Permutation Instructions
+  */
+ 
+-/* Integer Extract Instruction */
+-
+ static void load_element(TCGv_i64 dest, TCGv_ptr base,
+                          int ofs, int sew, bool sign)
+ {
+@@ -3174,27 +3172,6 @@ static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
+     load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign);
+ }
+ 
+-static bool trans_vext_x_v(DisasContext *s, arg_r *a)
+-{
+-    TCGv_i64 tmp = tcg_temp_new_i64();
+-    TCGv dest = tcg_temp_new();
+-
+-    if (a->rs1 == 0) {
+-        /* Special case vmv.x.s rd, vs2. */
+-        vec_element_loadi(s, tmp, a->rs2, 0, false);
+-    } else {
+-        /* This instruction ignores LMUL and vector register groups */
+-        int vlmax = s->vlen >> (3 + s->sew);
+-        vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
+-    }
+-    tcg_gen_trunc_i64_tl(dest, tmp);
+-    gen_set_gpr(a->rd, dest);
+-
+-    tcg_temp_free(dest);
+-    tcg_temp_free_i64(tmp);
+-    return true;
+-}
+-
+ /* Integer Scalar Move Instruction */
+ 
+ static void store_element(TCGv_i64 val, TCGv_ptr base,
+-- 
+2.33.1
+

+ 60 - 0
recipes-devtools/qemu/qemu/0066-target-riscv-rvv-1.0-floating-point-min-max-instruct.patch

@@ -0,0 +1,60 @@
+From 442867b1fb418d240ad564f93ae9c020a82f2541 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 6 Jul 2020 20:03:09 +0800
+Subject: [PATCH 066/107] target/riscv: rvv-1.0: floating-point min/max
+ instructions
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/vector_helper.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index b82fe7841a..45ad6eaa96 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -3596,28 +3596,28 @@ GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4)
+ GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8)
+ 
+ /* Vector Floating-Point MIN/MAX Instructions */
+-RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum)
+-RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum)
+-RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum)
++RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum_noprop)
++RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum_noprop)
++RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum_noprop)
+ GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2)
+ GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4)
+ GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8)
+-RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum)
+-RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum)
+-RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum)
++RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum_noprop)
++RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum_noprop)
++RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum_noprop)
+ GEN_VEXT_VF(vfmin_vf_h, 2, 2)
+ GEN_VEXT_VF(vfmin_vf_w, 4, 4)
+ GEN_VEXT_VF(vfmin_vf_d, 8, 8)
+ 
+-RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum)
+-RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum)
+-RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum)
++RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum_noprop)
++RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum_noprop)
++RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum_noprop)
+ GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2)
+ GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4)
+ GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8)
+-RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum)
+-RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum)
+-RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum)
++RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum_noprop)
++RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum_noprop)
++RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum_noprop)
+ GEN_VEXT_VF(vfmax_vf_h, 2, 2)
+ GEN_VEXT_VF(vfmax_vf_w, 4, 4)
+ GEN_VEXT_VF(vfmax_vf_d, 8, 8)
+-- 
+2.33.1
+

+ 156 - 0
recipes-devtools/qemu/qemu/0067-target-riscv-introduce-floating-point-rounding-mode-.patch

@@ -0,0 +1,156 @@
+From 70d3125d83a632a33ea81cfe889608cccc31416f Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 5 Aug 2020 11:08:59 +0800
+Subject: [PATCH 067/107] target/riscv: introduce floating-point rounding mode
+ enum
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ target/riscv/fpu_helper.c               | 12 ++++++------
+ target/riscv/insn_trans/trans_rvv.c.inc | 18 +++++++++---------
+ target/riscv/internals.h                |  9 +++++++++
+ 3 files changed, 24 insertions(+), 15 deletions(-)
+
+diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
+index 09d37c1384..96d7762243 100644
+--- a/target/riscv/fpu_helper.c
++++ b/target/riscv/fpu_helper.c
+@@ -55,23 +55,23 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
+ {
+     int softrm;
+ 
+-    if (rm == 7) {
++    if (rm == RISCV_FRM_DYN) {
+         rm = env->frm;
+     }
+     switch (rm) {
+-    case 0:
++    case RISCV_FRM_RNE:
+         softrm = float_round_nearest_even;
+         break;
+-    case 1:
++    case RISCV_FRM_RTZ:
+         softrm = float_round_to_zero;
+         break;
+-    case 2:
++    case RISCV_FRM_RDN:
+         softrm = float_round_down;
+         break;
+-    case 3:
++    case RISCV_FRM_RUP:
+         softrm = float_round_up;
+         break;
+-    case 4:
++    case RISCV_FRM_RMM:
+         softrm = float_round_ties_away;
+         break;
+     default:
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 8cc79f1228..92f66728c6 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2321,7 +2321,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+             gen_helper_##NAME##_d,                                 \
+         };                                                         \
+         TCGLabel *over = gen_new_label();                          \
+-        gen_set_rm(s, 7);                                          \
++        gen_set_rm(s, RISCV_FRM_DYN);                              \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+@@ -2400,7 +2400,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)            \
+             gen_helper_##NAME##_w,                                \
+             gen_helper_##NAME##_d,                                \
+         };                                                        \
+-        gen_set_rm(s, 7);                                         \
++        gen_set_rm(s, RISCV_FRM_DYN);                             \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);            \
+         return opfvf_trans(a->rd, a->rs1, a->rs2, data,           \
+@@ -2432,7 +2432,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
+             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
+         };                                                       \
+         TCGLabel *over = gen_new_label();                        \
+-        gen_set_rm(s, 7);                                        \
++        gen_set_rm(s, RISCV_FRM_DYN);                            \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);        \
+                                                                  \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
+@@ -2468,7 +2468,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
+         static gen_helper_opfvf *const fns[2] = {                \
+             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
+         };                                                       \
+-        gen_set_rm(s, 7);                                        \
++        gen_set_rm(s, RISCV_FRM_DYN);                            \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
+         return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
+@@ -2498,7 +2498,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
+             gen_helper_##NAME##_h, gen_helper_##NAME##_w,          \
+         };                                                         \
+         TCGLabel *over = gen_new_label();                          \
+-        gen_set_rm(s, 7);                                          \
++        gen_set_rm(s, RISCV_FRM_DYN);                              \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+@@ -2534,7 +2534,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
+         static gen_helper_opfvf *const fns[2] = {                \
+             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
+         };                                                       \
+-        gen_set_rm(s, 7);                                        \
++        gen_set_rm(s, RISCV_FRM_DYN);                            \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
+         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
+         return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
+@@ -2611,7 +2611,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+             gen_helper_##NAME##_d,                                 \
+         };                                                         \
+         TCGLabel *over = gen_new_label();                          \
+-        gen_set_rm(s, 7);                                          \
++        gen_set_rm(s, RISCV_FRM_DYN);                              \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+@@ -2757,7 +2757,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+             gen_helper_##NAME##_w,                                 \
+         };                                                         \
+         TCGLabel *over = gen_new_label();                          \
+-        gen_set_rm(s, 7);                                          \
++        gen_set_rm(s, RISCV_FRM_DYN);                              \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+@@ -2804,7 +2804,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+             gen_helper_##NAME##_w,                                 \
+         };                                                         \
+         TCGLabel *over = gen_new_label();                          \
+-        gen_set_rm(s, 7);                                          \
++        gen_set_rm(s, RISCV_FRM_DYN);                              \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+diff --git a/target/riscv/internals.h b/target/riscv/internals.h
+index ac062dc0b4..db105d4d64 100644
+--- a/target/riscv/internals.h
++++ b/target/riscv/internals.h
+@@ -36,6 +36,15 @@ target_ulong fclass_d(uint64_t frs1);
+ extern const VMStateDescription vmstate_riscv_cpu;
+ #endif
+ 
++enum {
++    RISCV_FRM_RNE = 0,  /* Round to Nearest, ties to Even */
++    RISCV_FRM_RTZ = 1,  /* Round towards Zero */
++    RISCV_FRM_RDN = 2,  /* Round Down */
++    RISCV_FRM_RUP = 3,  /* Round Up */
++    RISCV_FRM_RMM = 4,  /* Round to Nearest, ties to Max Magnitude */
++    RISCV_FRM_DYN = 7,  /* Dynamic rounding mode */
++};
++
+ static inline uint64_t nanbox_s(float32 f)
+ {
+     return f | MAKE_64BIT_MASK(32, 32);
+-- 
+2.33.1
+

+ 160 - 0
recipes-devtools/qemu/qemu/0068-target-riscv-rvv-1.0-floating-point-integer-type-con.patch

@@ -0,0 +1,160 @@
+From 368a058ec5f3f608f60280ac0f0c6c876725d62c Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 5 Aug 2020 14:31:00 +0800
+Subject: [PATCH 068/107] target/riscv: rvv-1.0: floating-point/integer
+ type-convert instructions
+
+Add the following instructions:
+
+* vfcvt.rtz.xu.f.v
+* vfcvt.rtz.x.f.v
+
+Also adjust GEN_OPFV_TRANS() to accept multiple floating-point rounding
+modes.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/insn32.decode              | 11 ++--
+ target/riscv/insn_trans/trans_rvv.c.inc | 83 ++++++++++++++++---------
+ 2 files changed, 59 insertions(+), 35 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 8794397c74..c3482a5f44 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -560,10 +560,13 @@ vmfge_vf        011111 . ..... ..... 101 ..... 1010111 @r_vm
+ vfclass_v       010011 . ..... 10000 001 ..... 1010111 @r2_vm
+ vfmerge_vfm     010111 0 ..... ..... 101 ..... 1010111 @r_vm_0
+ vfmv_v_f        010111 1 00000 ..... 101 ..... 1010111 @r2
+-vfcvt_xu_f_v    100010 . ..... 00000 001 ..... 1010111 @r2_vm
+-vfcvt_x_f_v     100010 . ..... 00001 001 ..... 1010111 @r2_vm
+-vfcvt_f_xu_v    100010 . ..... 00010 001 ..... 1010111 @r2_vm
+-vfcvt_f_x_v     100010 . ..... 00011 001 ..... 1010111 @r2_vm
++
++vfcvt_xu_f_v       010010 . ..... 00000 001 ..... 1010111 @r2_vm
++vfcvt_x_f_v        010010 . ..... 00001 001 ..... 1010111 @r2_vm
++vfcvt_f_xu_v       010010 . ..... 00010 001 ..... 1010111 @r2_vm
++vfcvt_f_x_v        010010 . ..... 00011 001 ..... 1010111 @r2_vm
++vfcvt_rtz_xu_f_v   010010 . ..... 00110 001 ..... 1010111 @r2_vm
++vfcvt_rtz_x_f_v    010010 . ..... 00111 001 ..... 1010111 @r2_vm
+ vfwcvt_xu_f_v   100010 . ..... 01000 001 ..... 1010111 @r2_vm
+ vfwcvt_x_f_v    100010 . ..... 01001 001 ..... 1010111 @r2_vm
+ vfwcvt_f_xu_v   100010 . ..... 01010 001 ..... 1010111 @r2_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 92f66728c6..86eefbf87b 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -1,5 +1,4 @@
+ /*
+- * RISC-V translation routines for the RVV Standard Extension.
+  *
+  * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
+  *
+@@ -2600,33 +2599,41 @@ static bool opfv_check(DisasContext *s, arg_rmr *a)
+            vext_check_ss(s, a->rd, a->rs2, a->vm);
+ }
+ 
+-#define GEN_OPFV_TRANS(NAME, CHECK)                                \
+-static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+-{                                                                  \
+-    if (CHECK(s, a)) {                                             \
+-        uint32_t data = 0;                                         \
+-        static gen_helper_gvec_3_ptr * const fns[3] = {            \
+-            gen_helper_##NAME##_h,                                 \
+-            gen_helper_##NAME##_w,                                 \
+-            gen_helper_##NAME##_d,                                 \
+-        };                                                         \
+-        TCGLabel *over = gen_new_label();                          \
+-        gen_set_rm(s, RISCV_FRM_DYN);                              \
+-        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+-                                                                   \
+-        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+-        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+-        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+-                           vreg_ofs(s, a->rs2), cpu_env, 0,        \
+-                           s->vlen / 8, data, fns[s->sew - 1]);    \
+-        mark_vs_dirty(s);                                          \
+-        gen_set_label(over);                                       \
+-        return true;                                               \
+-    }                                                              \
+-    return false;                                                  \
++static bool do_opfv(DisasContext *s, arg_rmr *a,
++                    gen_helper_gvec_3_ptr *fn,
++                    bool (*checkfn)(DisasContext *, arg_rmr *),
++                    int rm)
++{
++    if (checkfn(s, a)) {
++        uint32_t data = 0;
++        TCGLabel *over = gen_new_label();
++        gen_set_rm(s, rm);
++        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
++
++        data = FIELD_DP32(data, VDATA, VM, a->vm);
++        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
++        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
++                vreg_ofs(s, a->rs2), cpu_env, 0,
++                s->vlen / 8, data, fn);
++        mark_vs_dirty(s);
++        gen_set_label(over);
++        return true;
++    }
++    return false;
++}
++
++#define GEN_OPFV_TRANS(NAME, CHECK, FRM)               \
++static bool trans_##NAME(DisasContext *s, arg_rmr *a)  \
++{                                                      \
++    static gen_helper_gvec_3_ptr * const fns[3] = {    \
++        gen_helper_##NAME##_h,                         \
++        gen_helper_##NAME##_w,                         \
++        gen_helper_##NAME##_d                          \
++    };                                                 \
++    return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
+ }
+ 
+-GEN_OPFV_TRANS(vfsqrt_v, opfv_check)
++GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
+ 
+ /* Vector Floating-Point MIN/MAX Instructions */
+ GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
+@@ -2672,7 +2679,7 @@ GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
+ GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
+ 
+ /* Vector Floating-Point Classify Instruction */
+-GEN_OPFV_TRANS(vfclass_v, opfv_check)
++GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN)
+ 
+ /* Vector Floating-Point Merge Instruction */
+ GEN_OPFVF_TRANS(vfmerge_vfm,  opfvf_check)
+@@ -2727,10 +2734,24 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+ }
+ 
+ /* Single-Width Floating-Point/Integer Type-Convert Instructions */
+-GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check)
+-GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check)
+-GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check)
+-GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
++#define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM)               \
++static bool trans_##NAME(DisasContext *s, arg_rmr *a)       \
++{                                                           \
++    static gen_helper_gvec_3_ptr * const fns[3] = {         \
++        gen_helper_##HELPER##_h,                            \
++        gen_helper_##HELPER##_w,                            \
++        gen_helper_##HELPER##_d                             \
++    };                                                      \
++    return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
++}
++
++GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN)
++GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN)
++GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN)
++GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN)
++/* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */
++GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ)
++GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
+ 
+ /* Widening Floating-Point/Integer Type-Convert Instructions */
+ 
+-- 
+2.33.1
+

+ 187 - 0
recipes-devtools/qemu/qemu/0069-target-riscv-rvv-1.0-widening-floating-point-integer.patch

@@ -0,0 +1,187 @@
+From e5d64e4ca124cce1c3f91bef3048dc169548163d Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 17 Aug 2020 14:52:27 +0800
+Subject: [PATCH 069/107] target/riscv: rvv-1.0: widening
+ floating-point/integer type-convert
+
+Add the following instructions:
+
+* vfwcvt.rtz.xu.f.v
+* vfwcvt.rtz.x.f.v
+
+Also adjust GEN_OPFV_WIDEN_TRANS() to accept multiple floating-point
+rounding modes.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/helper.h                   |  2 +
+ target/riscv/insn32.decode              | 13 +++---
+ target/riscv/insn_trans/trans_rvv.c.inc | 54 +++++++++++++++++++++----
+ target/riscv/vector_helper.c            |  7 +++-
+ 4 files changed, 62 insertions(+), 14 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 1ad2df98ff..51174cdafa 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1003,8 +1003,10 @@ DEF_HELPER_5(vfwcvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_x_f_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_x_f_v_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfwcvt_f_xu_v_b, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfwcvt_f_x_v_b, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_f_x_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index c3482a5f44..2d648ffd24 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -567,11 +567,14 @@ vfcvt_f_xu_v       010010 . ..... 00010 001 ..... 1010111 @r2_vm
+ vfcvt_f_x_v        010010 . ..... 00011 001 ..... 1010111 @r2_vm
+ vfcvt_rtz_xu_f_v   010010 . ..... 00110 001 ..... 1010111 @r2_vm
+ vfcvt_rtz_x_f_v    010010 . ..... 00111 001 ..... 1010111 @r2_vm
+-vfwcvt_xu_f_v   100010 . ..... 01000 001 ..... 1010111 @r2_vm
+-vfwcvt_x_f_v    100010 . ..... 01001 001 ..... 1010111 @r2_vm
+-vfwcvt_f_xu_v   100010 . ..... 01010 001 ..... 1010111 @r2_vm
+-vfwcvt_f_x_v    100010 . ..... 01011 001 ..... 1010111 @r2_vm
+-vfwcvt_f_f_v    100010 . ..... 01100 001 ..... 1010111 @r2_vm
++
++vfwcvt_xu_f_v      010010 . ..... 01000 001 ..... 1010111 @r2_vm
++vfwcvt_x_f_v       010010 . ..... 01001 001 ..... 1010111 @r2_vm
++vfwcvt_f_xu_v      010010 . ..... 01010 001 ..... 1010111 @r2_vm
++vfwcvt_f_x_v       010010 . ..... 01011 001 ..... 1010111 @r2_vm
++vfwcvt_f_f_v       010010 . ..... 01100 001 ..... 1010111 @r2_vm
++vfwcvt_rtz_xu_f_v  010010 . ..... 01110 001 ..... 1010111 @r2_vm
++vfwcvt_rtz_x_f_v   010010 . ..... 01111 001 ..... 1010111 @r2_vm
+ vfncvt_xu_f_v   100010 . ..... 10000 001 ..... 1010111 @r2_vm
+ vfncvt_x_f_v    100010 . ..... 10001 001 ..... 1010111 @r2_vm
+ vfncvt_f_xu_v   100010 . ..... 10010 001 ..... 1010111 @r2_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 86eefbf87b..da8bf8940e 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2768,12 +2768,54 @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
+            vext_check_ds(s, a->rd, a->rs2, a->vm);
+ }
+ 
+-#define GEN_OPFV_WIDEN_TRANS(NAME)                                 \
++#define GEN_OPFV_WIDEN_TRANS(NAME, HELPER, FRM)                    \
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+ {                                                                  \
+     if (opfv_widen_check(s, a)) {                                  \
+         uint32_t data = 0;                                         \
+         static gen_helper_gvec_3_ptr * const fns[2] = {            \
++            gen_helper_##HELPER##_h,                               \
++            gen_helper_##HELPER##_w,                               \
++        };                                                         \
++        TCGLabel *over = gen_new_label();                          \
++        gen_set_rm(s, FRM);                                        \
++        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
++                                                                   \
++        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
++        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
++        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
++                           vreg_ofs(s, a->rs2), cpu_env, 0,        \
++                           s->vlen / 8, data, fns[s->sew - 1]);    \
++        mark_vs_dirty(s);                                          \
++        gen_set_label(over);                                       \
++        return true;                                               \
++    }                                                              \
++    return false;                                                  \
++}
++
++GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_DYN)
++GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, vfwcvt_x_f_v, RISCV_FRM_DYN)
++GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, vfwcvt_f_f_v, RISCV_FRM_DYN)
++/* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
++GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_RTZ)
++GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, vfwcvt_x_f_v, RISCV_FRM_RTZ)
++
++static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
++{
++    return require_rvv(s) &&
++           require_scale_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           /* OPFV widening instructions ignore vs1 check */
++           vext_check_ds(s, a->rd, a->rs2, a->vm);
++}
++
++#define GEN_OPFXV_WIDEN_TRANS(NAME)                                \
++static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
++{                                                                  \
++    if (opfxv_widen_check(s, a)) {                                 \
++        uint32_t data = 0;                                         \
++        static gen_helper_gvec_3_ptr * const fns[3] = {            \
++            gen_helper_##NAME##_b,                                 \
+             gen_helper_##NAME##_h,                                 \
+             gen_helper_##NAME##_w,                                 \
+         };                                                         \
+@@ -2782,10 +2824,9 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+-        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
+         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
+                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
+-                           s->vlen / 8, data, fns[s->sew - 1]);    \
++                           s->vlen / 8, data, fns[s->sew]);        \
+         mark_vs_dirty(s);                                          \
+         gen_set_label(over);                                       \
+         return true;                                               \
+@@ -2793,11 +2834,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+     return false;                                                  \
+ }
+ 
+-GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v)
+-GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v)
+-GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v)
+-GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v)
+-GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
++GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v)
++GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
+ 
+ /* Narrowing Floating-Point/Integer Type-Convert Instructions */
+ 
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 45ad6eaa96..6e2995b8ca 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -3982,6 +3982,7 @@ GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8)
+ 
+ /* Widening Floating-Point/Integer Type-Convert Instructions */
+ /* (TD, T2, TX2) */
++#define WOP_UU_B uint16_t, uint8_t,  uint8_t
+ #define WOP_UU_H uint32_t, uint16_t, uint16_t
+ #define WOP_UU_W uint64_t, uint32_t, uint32_t
+ /* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/
+@@ -3997,19 +3998,23 @@ GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4)
+ GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8)
+ 
+ /* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */
++RVVCALL(OPFVV1, vfwcvt_f_xu_v_b, WOP_UU_B, H2, H1, uint8_to_float16)
+ RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32)
+ RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64)
++GEN_VEXT_V_ENV(vfwcvt_f_xu_v_b, 1, 2)
+ GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4)
+ GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8)
+ 
+ /* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */
++RVVCALL(OPFVV1, vfwcvt_f_x_v_b, WOP_UU_B, H2, H1, int8_to_float16)
+ RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32)
+ RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64)
++GEN_VEXT_V_ENV(vfwcvt_f_x_v_b, 1, 2)
+ GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4)
+ GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8)
+ 
+ /*
+- * vfwcvt.f.f.v vd, vs2, vm #
++ * vfwcvt.f.f.v vd, vs2, vm
+  * Convert single-width float to double-width float.
+  */
+ static uint32_t vfwcvtffv16(uint16_t a, float_status *s)
+-- 
+2.33.1
+

+ 84 - 0
recipes-devtools/qemu/qemu/0070-target-riscv-add-set-round-to-odd-rounding-mode-help.patch

@@ -0,0 +1,84 @@
+From 1fb5a0f3fb0054da1a74e7991b89ecbf0f587452 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 5 Aug 2020 17:10:50 +0800
+Subject: [PATCH 070/107] target/riscv: add "set round to odd" rounding mode
+ helper function
+
+helper_set_rounding_mode() is responsible for SIGILL, and "round to odd"
+should be an interface private to translation, so add a new independent
+helper_set_rod_rounding_mode().
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/fpu_helper.c | 5 +++++
+ target/riscv/helper.h     | 1 +
+ target/riscv/internals.h  | 1 +
+ target/riscv/translate.c  | 5 +++++
+ 4 files changed, 12 insertions(+)
+
+diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
+index 96d7762243..6a054fa1e3 100644
+--- a/target/riscv/fpu_helper.c
++++ b/target/riscv/fpu_helper.c
+@@ -81,6 +81,11 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
+     set_float_rounding_mode(softrm, &env->fp_status);
+ }
+ 
++void helper_set_rod_rounding_mode(CPURISCVState *env)
++{
++    set_float_rounding_mode(float_round_to_odd, &env->fp_status);
++}
++
+ static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
+                            uint64_t rs3, int flags)
+ {
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 51174cdafa..a2a82e92e5 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -3,6 +3,7 @@ DEF_HELPER_2(raise_exception, noreturn, env, i32)
+ 
+ /* Floating Point - rounding mode */
+ DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32)
++DEF_HELPER_FLAGS_1(set_rod_rounding_mode, TCG_CALL_NO_WG, void, env)
+ 
+ /* Floating Point - fused */
+ DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+diff --git a/target/riscv/internals.h b/target/riscv/internals.h
+index db105d4d64..065e8162a2 100644
+--- a/target/riscv/internals.h
++++ b/target/riscv/internals.h
+@@ -43,6 +43,7 @@ enum {
+     RISCV_FRM_RUP = 3,  /* Round Up */
+     RISCV_FRM_RMM = 4,  /* Round to Nearest, ties to Max Magnitude */
+     RISCV_FRM_DYN = 7,  /* Dynamic rounding mode */
++    RISCV_FRM_ROD = 8,  /* Round to Odd */
+ };
+ 
+ static inline uint64_t nanbox_s(float32 f)
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index d10e489cfe..4dfb0a2a51 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -30,6 +30,7 @@
+ #include "exec/log.h"
+ 
+ #include "instmap.h"
++#include "internals.h"
+ 
+ /* global register indices */
+ static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
+@@ -458,6 +459,10 @@ static void gen_set_rm(DisasContext *ctx, int rm)
+         return;
+     }
+     ctx->frm = rm;
++    if (rm == RISCV_FRM_ROD) {
++        gen_helper_set_rod_rounding_mode(cpu_env);
++        return;
++    }
+     t0 = tcg_const_i32(rm);
+     gen_helper_set_rounding_mode(cpu_env, t0);
+     tcg_temp_free_i32(t0);
+-- 
+2.33.1
+

+ 231 - 0
recipes-devtools/qemu/qemu/0071-target-riscv-rvv-1.0-narrowing-floating-point-intege.patch

@@ -0,0 +1,231 @@
+From ab9b645b33f23f4e2fef14032c50b9351efd0d70 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 17 Aug 2020 13:38:30 +0800
+Subject: [PATCH 071/107] target/riscv: rvv-1.0: narrowing
+ floating-point/integer type-convert
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/helper.h                   | 22 +++++-----
+ target/riscv/insn32.decode              | 15 ++++---
+ target/riscv/insn_trans/trans_rvv.c.inc | 58 +++++++++++++++++++++----
+ target/riscv/vector_helper.c            | 45 ++++++++++---------
+ 4 files changed, 96 insertions(+), 44 deletions(-)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index a2a82e92e5..572d2e5a4c 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1013,16 +1013,18 @@ DEF_HELPER_5(vfwcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfwcvt_f_f_v_w, void, ptr, ptr, ptr, env, i32)
+ 
+-DEF_HELPER_5(vfncvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_x_f_v_h, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_x_f_v_w, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_f_x_v_h, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
+-DEF_HELPER_5(vfncvt_f_f_v_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_xu_f_w_b, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_xu_f_w_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_xu_f_w_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_x_f_w_b, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_x_f_w_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_x_f_w_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_f_xu_w_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_f_xu_w_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_f_x_w_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_f_x_w_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_f_f_w_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfncvt_f_f_w_w, void, ptr, ptr, ptr, env, i32)
+ 
+ DEF_HELPER_6(vredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 2d648ffd24..65d84f0b7c 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -575,11 +575,16 @@ vfwcvt_f_x_v       010010 . ..... 01011 001 ..... 1010111 @r2_vm
+ vfwcvt_f_f_v       010010 . ..... 01100 001 ..... 1010111 @r2_vm
+ vfwcvt_rtz_xu_f_v  010010 . ..... 01110 001 ..... 1010111 @r2_vm
+ vfwcvt_rtz_x_f_v   010010 . ..... 01111 001 ..... 1010111 @r2_vm
+-vfncvt_xu_f_v   100010 . ..... 10000 001 ..... 1010111 @r2_vm
+-vfncvt_x_f_v    100010 . ..... 10001 001 ..... 1010111 @r2_vm
+-vfncvt_f_xu_v   100010 . ..... 10010 001 ..... 1010111 @r2_vm
+-vfncvt_f_x_v    100010 . ..... 10011 001 ..... 1010111 @r2_vm
+-vfncvt_f_f_v    100010 . ..... 10100 001 ..... 1010111 @r2_vm
++
++vfncvt_xu_f_w      010010 . ..... 10000 001 ..... 1010111 @r2_vm
++vfncvt_x_f_w       010010 . ..... 10001 001 ..... 1010111 @r2_vm
++vfncvt_f_xu_w      010010 . ..... 10010 001 ..... 1010111 @r2_vm
++vfncvt_f_x_w       010010 . ..... 10011 001 ..... 1010111 @r2_vm
++vfncvt_f_f_w       010010 . ..... 10100 001 ..... 1010111 @r2_vm
++vfncvt_rod_f_f_w   010010 . ..... 10101 001 ..... 1010111 @r2_vm
++vfncvt_rtz_xu_f_w  010010 . ..... 10110 001 ..... 1010111 @r2_vm
++vfncvt_rtz_x_f_w   010010 . ..... 10111 001 ..... 1010111 @r2_vm
++
+ vredsum_vs      000000 . ..... ..... 010 ..... 1010111 @r_vm
+ vredand_vs      000001 . ..... ..... 010 ..... 1010111 @r_vm
+ vredor_vs       000010 . ..... ..... 010 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index da8bf8940e..ea79cfe7b8 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2853,17 +2853,17 @@ static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
+            vext_check_sd(s, a->rd, a->rs2, a->vm);
+ }
+ 
+-#define GEN_OPFV_NARROW_TRANS(NAME)                                \
++#define GEN_OPFV_NARROW_TRANS(NAME, HELPER, FRM)                   \
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+ {                                                                  \
+     if (opfv_narrow_check(s, a)) {                                 \
+         uint32_t data = 0;                                         \
+         static gen_helper_gvec_3_ptr * const fns[2] = {            \
+-            gen_helper_##NAME##_h,                                 \
+-            gen_helper_##NAME##_w,                                 \
++            gen_helper_##HELPER##_h,                               \
++            gen_helper_##HELPER##_w,                               \
+         };                                                         \
+         TCGLabel *over = gen_new_label();                          \
+-        gen_set_rm(s, RISCV_FRM_DYN);                              \
++        gen_set_rm(s, FRM);                                        \
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
+                                                                    \
+         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
+@@ -2878,11 +2878,51 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+     return false;                                                  \
+ }
+ 
+-GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v)
+-GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v)
+-GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v)
+-GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v)
+-GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
++GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, vfncvt_f_xu_w, RISCV_FRM_DYN)
++GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, vfncvt_f_x_w, RISCV_FRM_DYN)
++GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, vfncvt_f_f_w, RISCV_FRM_DYN)
++/* Reuse the helper function from vfncvt.f.f.w */
++GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, vfncvt_f_f_w, RISCV_FRM_ROD)
++
++static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
++{
++    return require_rvv(s) &&
++           require_scale_rvf(s) &&
++           vext_check_isa_ill(s) &&
++           /* OPFV narrowing instructions ignore vs1 check */
++           vext_check_sd(s, a->rd, a->rs2, a->vm);
++}
++
++#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM)                  \
++static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
++{                                                                  \
++    if (opxfv_narrow_check(s, a)) {                                \
++        uint32_t data = 0;                                         \
++        static gen_helper_gvec_3_ptr * const fns[3] = {            \
++            gen_helper_##HELPER##_b,                               \
++            gen_helper_##HELPER##_h,                               \
++            gen_helper_##HELPER##_w,                               \
++        };                                                         \
++        TCGLabel *over = gen_new_label();                          \
++        gen_set_rm(s, FRM);                                        \
++        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
++                                                                   \
++        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
++        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
++                           vreg_ofs(s, a->rs2), cpu_env, 0,        \
++                           s->vlen / 8, data, fns[s->sew]);        \
++        mark_vs_dirty(s);                                          \
++        gen_set_label(over);                                       \
++        return true;                                               \
++    }                                                              \
++    return false;                                                  \
++}
++
++GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN)
++GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN)
++/* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */
++GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ)
++GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ)
+ 
+ /*
+  *** Vector Reduction Operations
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 6e2995b8ca..91dccdb04d 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -4029,31 +4029,36 @@ GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8)
+ 
+ /* Narrowing Floating-Point/Integer Type-Convert Instructions */
+ /* (TD, T2, TX2) */
++#define NOP_UU_B uint8_t,  uint16_t, uint32_t
+ #define NOP_UU_H uint16_t, uint32_t, uint32_t
+ #define NOP_UU_W uint32_t, uint64_t, uint64_t
+ /* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
+-RVVCALL(OPFVV1, vfncvt_xu_f_v_h, NOP_UU_H, H2, H4, float32_to_uint16)
+-RVVCALL(OPFVV1, vfncvt_xu_f_v_w, NOP_UU_W, H4, H8, float64_to_uint32)
+-GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2)
+-GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4)
++RVVCALL(OPFVV1, vfncvt_xu_f_w_b, NOP_UU_B, H1, H2, float16_to_uint8)
++RVVCALL(OPFVV1, vfncvt_xu_f_w_h, NOP_UU_H, H2, H4, float32_to_uint16)
++RVVCALL(OPFVV1, vfncvt_xu_f_w_w, NOP_UU_W, H4, H8, float64_to_uint32)
++GEN_VEXT_V_ENV(vfncvt_xu_f_w_b, 1, 1)
++GEN_VEXT_V_ENV(vfncvt_xu_f_w_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_xu_f_w_w, 4, 4)
+ 
+ /* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */
+-RVVCALL(OPFVV1, vfncvt_x_f_v_h, NOP_UU_H, H2, H4, float32_to_int16)
+-RVVCALL(OPFVV1, vfncvt_x_f_v_w, NOP_UU_W, H4, H8, float64_to_int32)
+-GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2)
+-GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4)
++RVVCALL(OPFVV1, vfncvt_x_f_w_b, NOP_UU_B, H1, H2, float16_to_int8)
++RVVCALL(OPFVV1, vfncvt_x_f_w_h, NOP_UU_H, H2, H4, float32_to_int16)
++RVVCALL(OPFVV1, vfncvt_x_f_w_w, NOP_UU_W, H4, H8, float64_to_int32)
++GEN_VEXT_V_ENV(vfncvt_x_f_w_b, 1, 1)
++GEN_VEXT_V_ENV(vfncvt_x_f_w_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_x_f_w_w, 4, 4)
+ 
+ /* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */
+-RVVCALL(OPFVV1, vfncvt_f_xu_v_h, NOP_UU_H, H2, H4, uint32_to_float16)
+-RVVCALL(OPFVV1, vfncvt_f_xu_v_w, NOP_UU_W, H4, H8, uint64_to_float32)
+-GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2)
+-GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4)
++RVVCALL(OPFVV1, vfncvt_f_xu_w_h, NOP_UU_H, H2, H4, uint32_to_float16)
++RVVCALL(OPFVV1, vfncvt_f_xu_w_w, NOP_UU_W, H4, H8, uint64_to_float32)
++GEN_VEXT_V_ENV(vfncvt_f_xu_w_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_f_xu_w_w, 4, 4)
+ 
+ /* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */
+-RVVCALL(OPFVV1, vfncvt_f_x_v_h, NOP_UU_H, H2, H4, int32_to_float16)
+-RVVCALL(OPFVV1, vfncvt_f_x_v_w, NOP_UU_W, H4, H8, int64_to_float32)
+-GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2)
+-GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4)
++RVVCALL(OPFVV1, vfncvt_f_x_w_h, NOP_UU_H, H2, H4, int32_to_float16)
++RVVCALL(OPFVV1, vfncvt_f_x_w_w, NOP_UU_W, H4, H8, int64_to_float32)
++GEN_VEXT_V_ENV(vfncvt_f_x_w_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_f_x_w_w, 4, 4)
+ 
+ /* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */
+ static uint16_t vfncvtffv16(uint32_t a, float_status *s)
+@@ -4061,10 +4066,10 @@ static uint16_t vfncvtffv16(uint32_t a, float_status *s)
+     return float32_to_float16(a, true, s);
+ }
+ 
+-RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, vfncvtffv16)
+-RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32)
+-GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2)
+-GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4)
++RVVCALL(OPFVV1, vfncvt_f_f_w_h, NOP_UU_H, H2, H4, vfncvtffv16)
++RVVCALL(OPFVV1, vfncvt_f_f_w_w, NOP_UU_W, H4, H8, float64_to_float32)
++GEN_VEXT_V_ENV(vfncvt_f_f_w_h, 2, 2)
++GEN_VEXT_V_ENV(vfncvt_f_f_w_w, 4, 4)
+ 
+ /*
+  *** Vector Reduction Operations
+-- 
+2.33.1
+

+ 58 - 0
recipes-devtools/qemu/qemu/0072-target-riscv-rvv-1.0-relax-RV_VLEN_MAX-to-1024-bits.patch

@@ -0,0 +1,58 @@
+From c20e6b080e9f932469ed72862e4a45cd314975e2 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Mon, 28 Sep 2020 15:57:56 +0800
+Subject: [PATCH 072/107] target/riscv: rvv-1.0: relax RV_VLEN_MAX to 1024-bits
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+
+--
+---
+ target/riscv/cpu.h                      | 2 +-
+ target/riscv/insn_trans/trans_rvv.c.inc | 4 ++--
+ target/riscv/vector_helper.c            | 2 +-
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index f773bae96b..d613520211 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -103,7 +103,7 @@ typedef struct CPURISCVState CPURISCVState;
+ 
+ #include "pmp.h"
+ 
+-#define RV_VLEN_MAX 256
++#define RV_VLEN_MAX 1024
+ 
+ FIELD(VTYPE, VLMUL, 0, 3)
+ FIELD(VTYPE, VSEW, 3, 3)
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index ea79cfe7b8..d205a05e83 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -613,8 +613,8 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+     base = tcg_temp_new();
+ 
+     /*
+-     * As simd_desc supports at most 256 bytes, and in this implementation,
+-     * the max vector group length is 2048 bytes. So split it into two parts.
++     * As simd_desc supports at most 2048 bytes, and in this implementation,
++     * the max vector group length is 4096 bytes. So split it into two parts.
+      *
+      * The first part is vlen in bytes, encoded in maxsz of simd_desc.
+      * The second part is lmul, encoded in data of simd_desc.
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 91dccdb04d..6f4f30c5f9 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -129,7 +129,7 @@ static uint32_t vext_wd(uint32_t desc)
+ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t esz)
+ {
+     /*
+-     * As simd_desc support at most 256 bytes, the max vlen is 256 bits.
++     * As simd_desc support at most 2048 bytes, the max vlen is 1024 bits.
+      * so vlen in bytes (vlenb) is encoded as maxsz.
+      */
+     uint32_t vlenb = simd_maxsz(desc);
+-- 
+2.33.1
+

+ 1078 - 0
recipes-devtools/qemu/qemu/0073-target-riscv-rvv-1.0-implement-vstart-CSR.patch

@@ -0,0 +1,1078 @@
+From 3968a1d276a980cfd6a0db2eb826c4b850f6d595 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 14 Aug 2020 17:02:24 +0800
+Subject: [PATCH 073/107] target/riscv: rvv-1.0: implement vstart CSR
+
+* Update and check vstart value for vector instructions.
+* Add whole register move instruction helper functions as we have to
+  call helper function for case where vstart is not zero.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+
+--
+
+Perhaps we can remove the probe functions in vector_helper.c to align with
+the hardware's behavior, which raise the memory access exceptions and
+update vstart value at the exact processing vector element.
+---
+ target/riscv/csr.c                      |   6 +-
+ target/riscv/helper.h                   |   5 +
+ target/riscv/insn_trans/trans_rvv.c.inc |  71 ++++++---
+ target/riscv/translate.c                |   6 +-
+ target/riscv/vector_helper.c            | 201 +++++++++++++++++-------
+ 5 files changed, 205 insertions(+), 84 deletions(-)
+
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index e065b042df..b932e28bbf 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -334,7 +334,11 @@ static int write_vstart(CPURISCVState *env, int csrno, target_ulong val)
+     env->mstatus |= MSTATUS_VS;
+ #endif
+ 
+-    env->vstart = val;
++    /*
++     * The vstart CSR is defined to have only enough writable bits
++     * to hold the largest element index, i.e. lg2(VLEN) bits.
++     */
++    env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
+     return 0;
+ }
+ 
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 572d2e5a4c..e233548623 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -1148,6 +1148,11 @@ DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32)
+ 
++DEF_HELPER_4(vmv1r_v, void, ptr, ptr, env, i32)
++DEF_HELPER_4(vmv2r_v, void, ptr, ptr, env, i32)
++DEF_HELPER_4(vmv4r_v, void, ptr, ptr, env, i32)
++DEF_HELPER_4(vmv8r_v, void, ptr, ptr, env, i32)
++
+ DEF_HELPER_5(vzext_vf2_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vzext_vf2_w, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vzext_vf2_d, void, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index d205a05e83..b3d2a9113e 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -3014,7 +3014,8 @@ GEN_MM_TRANS(vmxnor_mm)
+ static bool trans_vpopc_m(DisasContext *s, arg_rmr *a)
+ {
+     if (require_rvv(s) &&
+-        vext_check_isa_ill(s)) {
++        vext_check_isa_ill(s) &&
++        s->vstart == 0) {
+         TCGv_ptr src2, mask;
+         TCGv dst;
+         TCGv_i32 desc;
+@@ -3047,7 +3048,8 @@ static bool trans_vpopc_m(DisasContext *s, arg_rmr *a)
+ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
+ {
+     if (require_rvv(s) &&
+-        vext_check_isa_ill(s)) {
++        vext_check_isa_ill(s) &&
++        s->vstart == 0) {
+         TCGv_ptr src2, mask;
+         TCGv dst;
+         TCGv_i32 desc;
+@@ -3084,7 +3086,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+     if (require_rvv(s) &&                                          \
+         vext_check_isa_ill(s) &&                                   \
+         require_vm(a->vm, a->rd) &&                                \
+-        (a->rd != a->rs2)) {                                       \
++        (a->rd != a->rs2) &&                                       \
++        (s->vstart == 0)) {                                        \
+         uint32_t data = 0;                                         \
+         gen_helper_gvec_3_ptr *fn = gen_helper_##NAME;             \
+         TCGLabel *over = gen_new_label();                          \
+@@ -3119,7 +3122,8 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
+         vext_check_isa_ill(s) &&
+         !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
+         require_vm(a->vm, a->rd) &&
+-        require_align(a->rd, s->lmul)) {
++        require_align(a->rd, s->lmul) &&
++        (s->vstart == 0)) {
+         uint32_t data = 0;
+         TCGLabel *over = gen_new_label();
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+@@ -3341,6 +3345,7 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
+         TCGLabel *over = gen_new_label();
+ 
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
++        tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+ 
+         t1 = tcg_temp_new_i64();
+         s1 = tcg_temp_new();
+@@ -3396,8 +3401,9 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
+         TCGv_i64 t1;
+         TCGLabel *over = gen_new_label();
+ 
+-        /* if vl == 0, skip vector register write back */
++        /* if vl == 0 or vstart >= vl, skip vector register write back */
+         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
++        tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+ 
+         /* NaN-box f[rs1] */
+         t1 = tcg_temp_new_i64();
+@@ -3569,6 +3575,7 @@ static bool vcompress_vm_check(DisasContext *s, arg_r *a)
+            require_align(a->rs2, s->lmul) &&
+            (a->rd != a->rs2) &&
+            !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
++           (s->vstart == 0);
+ }
+ 
+ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
+@@ -3597,26 +3604,40 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
+  * Whole Vector Register Move Instructions ignore vtype and vl setting.
+  * Thus, we don't need to check vill bit. (Section 17.6)
+  */
+-#define GEN_VMV_WHOLE_TRANS(NAME, LEN)                          \
+-static bool trans_##NAME(DisasContext *s, arg_##NAME * a)       \
+-{                                                               \
+-    if (require_rvv(s) &&                                       \
+-        QEMU_IS_ALIGNED(a->rd, LEN) &&                          \
+-        QEMU_IS_ALIGNED(a->rs2, LEN)) {                         \
+-        /* EEW = 8 */                                           \
+-        tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd),              \
+-                         vreg_ofs(s, a->rs2),                   \
+-                         s->vlen / 8 * LEN, s->vlen / 8 * LEN); \
+-        mark_vs_dirty(s);                                       \
+-        return true;                                            \
+-    }                                                           \
+-    return false;                                               \
+-}
+-
+-GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
+-GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
+-GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
+-GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
++#define GEN_VMV_WHOLE_TRANS(NAME, LEN, SEQ)                             \
++static bool trans_##NAME(DisasContext *s, arg_##NAME * a)               \
++{                                                                       \
++    if (require_rvv(s) &&                                               \
++        QEMU_IS_ALIGNED(a->rd, LEN) &&                                  \
++        QEMU_IS_ALIGNED(a->rs2, LEN)) {                                 \
++        uint32_t maxsz = (s->vlen >> 3) * LEN;                          \
++        if (s->vstart == 0) {                                           \
++            /* EEW = 8 */                                               \
++            tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd),                  \
++                             vreg_ofs(s, a->rs2), maxsz, maxsz);        \
++            mark_vs_dirty(s);                                           \
++        } else {                                                        \
++            TCGLabel *over = gen_new_label();                           \
++            tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over);  \
++                                                                        \
++            static gen_helper_gvec_2_ptr * const fns[4] = {             \
++                gen_helper_vmv1r_v, gen_helper_vmv2r_v,                 \
++                gen_helper_vmv4r_v, gen_helper_vmv8r_v,                 \
++            };                                                          \
++            tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
++                               cpu_env, 0, maxsz, 0, fns[SEQ]);         \
++            mark_vs_dirty(s);                                           \
++            gen_set_label(over);                                        \
++        }                                                               \
++        return true;                                                    \
++    }                                                                   \
++    return false;                                                       \
++}
++
++GEN_VMV_WHOLE_TRANS(vmv1r_v, 1, 0)
++GEN_VMV_WHOLE_TRANS(vmv2r_v, 2, 1)
++GEN_VMV_WHOLE_TRANS(vmv4r_v, 4, 2)
++GEN_VMV_WHOLE_TRANS(vmv8r_v, 8, 3)
+ 
+ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
+ {
+diff --git a/target/riscv/translate.c b/target/riscv/translate.c
+index 4dfb0a2a51..15305a3096 100644
+--- a/target/riscv/translate.c
++++ b/target/riscv/translate.c
+@@ -33,7 +33,7 @@
+ #include "internals.h"
+ 
+ /* global register indices */
+-static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
++static TCGv cpu_gpr[32], cpu_pc, cpu_vl, cpu_vstart;
+ static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
+ static TCGv load_res;
+ static TCGv load_val;
+@@ -77,6 +77,7 @@ typedef struct DisasContext {
+     int8_t lmul;
+     uint8_t sew;
+     uint16_t vlen;
++    target_ulong vstart;
+     bool vl_eq_vlmax;
+     CPUState *cs;
+ } DisasContext;
+@@ -712,6 +713,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
+     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
+     ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
++    ctx->vstart = env->vstart;
+     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
+     ctx->cs = cs;
+ }
+@@ -829,6 +831,8 @@ void riscv_translate_init(void)
+ 
+     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
+     cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
++    cpu_vstart = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vstart),
++                            "vstart");
+     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
+                              "load_res");
+     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 6f4f30c5f9..7a2d2e7949 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -232,14 +232,14 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+     uint32_t max_elems = vext_max_elems(desc, esz);
+ 
+     /* probe every access*/
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         probe_pages(env, base + stride * i, nf << esz, ra, access_type);
+     }
+     /* do real access */
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         k = 0;
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+@@ -249,7 +249,9 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
+             ldst_elem(env, addr, i + k * max_elems, vd, ra);
+             k++;
+         }
++        env->vstart = i;
+     }
++    env->vstart = 0;
+ }
+ 
+ #define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN)                        \
+@@ -299,14 +301,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+     /* probe every access */
+     probe_pages(env, base, env->vl * (nf << esz), ra, access_type);
+     /* load bytes from guest memory */
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         k = 0;
+         while (k < nf) {
+             target_ulong addr = base + ((i * nf + k) << esz);
+             ldst_elem(env, addr, i + k * max_elems, vd, ra);
+             k++;
+         }
++        env->vstart = i;
+     }
++    env->vstart = 0;
+ }
+ 
+ /*
+@@ -387,7 +391,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+     uint32_t max_elems = vext_max_elems(desc, esz);
+ 
+     /* probe every access*/
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+@@ -395,7 +399,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+                     access_type);
+     }
+     /* load bytes from guest memory */
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         k = 0;
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+@@ -405,7 +409,9 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
+             ldst_elem(env, addr, i + k * max_elems, vd, ra);
+             k++;
+         }
++        env->vstart = i;
+     }
++    env->vstart = 0;
+ }
+ 
+ #define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN)                  \
+@@ -476,7 +482,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
+     target_ulong addr, offset, remain;
+ 
+     /* probe every access*/
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+@@ -516,7 +522,7 @@ ProbeSuccess:
+     if (vl != 0) {
+         env->vl = vl;
+     }
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         k = 0;
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+@@ -527,6 +533,7 @@ ProbeSuccess:
+             k++;
+         }
+     }
++    env->vstart = 0;
+ }
+ 
+ #define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN)               \
+@@ -550,21 +557,37 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+                 vext_ldst_elem_fn *ldst_elem, uint32_t esz, uintptr_t ra,
+                 MMUAccessType access_type)
+ {
+-    uint32_t i, k;
++    uint32_t i, k, off, pos;
+     uint32_t nf = vext_nf(desc);
+     uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
+     uint32_t max_elems = vlenb >> esz;
+ 
+     /* probe every access */
+-    probe_pages(env, base, vlenb * nf, ra, access_type);
++    probe_pages(env, base, max_elems * nf, ra, access_type);
+ 
+-    /* load bytes from guest memory */
+-    for (k = 0; k < nf; k++) {
++    k = env->vstart / max_elems;
++    off = env->vstart % max_elems;
++
++    if (off) {
++        /* load/store rest of elements of current segment pointed by vstart */
++        for (pos = off; pos < max_elems; pos++) {
++            target_ulong addr = base + ((pos + k * max_elems) << esz);
++            ldst_elem(env, addr, pos + k * max_elems, vd, ra);
++            env->vstart++;
++        }
++        k++;
++    }
++
++    /* load/store elements for rest of segments */
++    for (; k < nf; k++) {
+         for (i = 0; i < max_elems; i++) {
+             target_ulong addr = base + ((i + k * max_elems) << esz);
+             ldst_elem(env, addr, i + k * max_elems, vd, ra);
++            env->vstart++;
+         }
+     }
++
++    env->vstart = 0;
+ }
+ 
+ #define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN)      \
+@@ -725,20 +748,21 @@ vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+     uint32_t wd = vext_wd(desc);
+     uint32_t vm = vext_vm(desc);
+ 
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_LOAD);
+         probe_pages(env, get_index_addr(base, i, vs2), esz, ra, MMU_DATA_STORE);
+     }
+-    for (i = 0; i < env->vl; i++) {
++    for (i = env->vstart; i < env->vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         addr = get_index_addr(base, i, vs2);
+         noatomic_op(vs3, addr, wd, i, env, ra);
+     }
++    env->vstart = 0;
+ }
+ 
+ #define GEN_VEXT_AMO(NAME, ETYPE, INDEX_FN)                     \
+@@ -895,12 +919,13 @@ static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
+     uint32_t vl = env->vl;
+     uint32_t i;
+ 
+-    for (i = 0; i < vl; i++) {
++    for (i = env->vstart; i < vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, vs1, vs2, i);
+     }
++    env->vstart = 0;
+ }
+ 
+ /* generate the helpers for OPIVV */
+@@ -957,12 +982,13 @@ static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
+     uint32_t vl = env->vl;
+     uint32_t i;
+ 
+-    for (i = 0; i < vl; i++) {
++    for (i = env->vstart; i < vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, s1, vs2, i);
+     }
++    env->vstart = 0;
+ }
+ 
+ /* generate the helpers for OPIVX */
+@@ -1150,13 +1176,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     uint32_t vl = env->vl;                                    \
+     uint32_t i;                                               \
+                                                               \
+-    for (i = 0; i < vl; i++) {                                \
++    for (i = env->vstart; i < vl; i++) {                      \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+         ETYPE carry = vext_elem_mask(v0, i);                  \
+                                                               \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry);         \
+     }                                                         \
++    env->vstart = 0;                                          \
+ }
+ 
+ GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t,  H1, DO_VADC)
+@@ -1176,12 +1203,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,        \
+     uint32_t vl = env->vl;                                               \
+     uint32_t i;                                                          \
+                                                                          \
+-    for (i = 0; i < vl; i++) {                                           \
++    for (i = env->vstart; i < vl; i++) {                                 \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                               \
+         ETYPE carry = vext_elem_mask(v0, i);                             \
+                                                                          \
+         *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
+     }                                                                    \
++    env->vstart = 0;                                          \
+ }
+ 
+ GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t,  H1, DO_VADC)
+@@ -1206,12 +1234,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     uint32_t vm = vext_vm(desc);                              \
+     uint32_t i;                                               \
+                                                               \
+-    for (i = 0; i < vl; i++) {                                \
++    for (i = env->vstart; i < vl; i++) {                      \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+         ETYPE carry = !vm && vext_elem_mask(v0, i);           \
+         vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry));      \
+     }                                                         \
++    env->vstart = 0;                                          \
+ }
+ 
+ GEN_VEXT_VMADC_VVM(vmadc_vvm_b, uint8_t,  H1, DO_MADC)
+@@ -1232,12 +1261,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,          \
+     uint32_t vm = vext_vm(desc);                                \
+     uint32_t i;                                                 \
+                                                                 \
+-    for (i = 0; i < vl; i++) {                                  \
++    for (i = env->vstart; i < vl; i++) {                        \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                      \
+         ETYPE carry = !vm && vext_elem_mask(v0, i);             \
+         vext_set_elem_mask(vd, i,                               \
+                 DO_OP(s2, (ETYPE)(target_long)s1, carry));      \
+     }                                                           \
++    env->vstart = 0;                                            \
+ }
+ 
+ GEN_VEXT_VMADC_VXM(vmadc_vxm_b, uint8_t,  H1, DO_MADC)
+@@ -1314,7 +1344,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,                          \
+     uint32_t vl = env->vl;                                                \
+     uint32_t i;                                                           \
+                                                                           \
+-    for (i = 0; i < vl; i++) {                                            \
++    for (i = env->vstart; i < vl; i++) {                                  \
+         if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+@@ -1322,6 +1352,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,                          \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                                  \
+         *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK);                        \
+     }                                                                     \
++    env->vstart = 0;                                                      \
+ }
+ 
+ GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t,  uint8_t, H1, H1, DO_SLL, 0x7)
+@@ -1348,13 +1379,14 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,      \
+     uint32_t vl = env->vl;                                  \
+     uint32_t i;                                             \
+                                                             \
+-    for (i = 0; i < vl; i++) {                              \
++    for (i = env->vstart; i < vl; i++) {                    \
+         if (!vm && !vext_elem_mask(v0, i)) {                \
+             continue;                                       \
+         }                                                   \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                    \
+         *((TD *)vd + HD(i)) = OP(s2, s1 & MASK);            \
+     }                                                       \
++    env->vstart = 0;                                        \
+ }
+ 
+ GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
+@@ -1401,7 +1433,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     uint32_t vl = env->vl;                                    \
+     uint32_t i;                                               \
+                                                               \
+-    for (i = 0; i < vl; i++) {                                \
++    for (i = env->vstart; i < vl; i++) {                      \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+         if (!vm && !vext_elem_mask(v0, i)) {                  \
+@@ -1409,6 +1441,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+         }                                                     \
+         vext_set_elem_mask(vd, i, DO_OP(s2, s1));             \
+     }                                                         \
++    env->vstart = 0;                                          \
+ }
+ 
+ GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t,  H1, DO_MSEQ)
+@@ -1449,7 +1482,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,   \
+     uint32_t vl = env->vl;                                          \
+     uint32_t i;                                                     \
+                                                                     \
+-    for (i = 0; i < vl; i++) {                                      \
++    for (i = env->vstart; i < vl; i++) {                            \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                          \
+         if (!vm && !vext_elem_mask(v0, i)) {                        \
+             continue;                                               \
+@@ -1457,6 +1490,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,   \
+         vext_set_elem_mask(vd, i,                                   \
+                 DO_OP(s2, (ETYPE)(target_long)s1));                 \
+     }                                                               \
++    env->vstart = 0;                                                \
+ }
+ 
+ GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t,  H1, DO_MSEQ)
+@@ -1979,10 +2013,11 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env,           \
+     uint32_t vl = env->vl;                                           \
+     uint32_t i;                                                      \
+                                                                      \
+-    for (i = 0; i < vl; i++) {                                       \
++    for (i = env->vstart; i < vl; i++) {                             \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                           \
+         *((ETYPE *)vd + H(i)) = s1;                                  \
+     }                                                                \
++    env->vstart = 0;                                                 \
+ }
+ 
+ GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t,  H1)
+@@ -1997,9 +2032,10 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env,         \
+     uint32_t vl = env->vl;                                           \
+     uint32_t i;                                                      \
+                                                                      \
+-    for (i = 0; i < vl; i++) {                                       \
++    for (i = env->vstart; i < vl; i++) {                             \
+         *((ETYPE *)vd + H(i)) = (ETYPE)s1;                           \
+     }                                                                \
++    env->vstart = 0;                                                 \
+ }
+ 
+ GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t,  H1)
+@@ -2014,10 +2050,11 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,          \
+     uint32_t vl = env->vl;                                           \
+     uint32_t i;                                                      \
+                                                                      \
+-    for (i = 0; i < vl; i++) {                                       \
++    for (i = env->vstart; i < vl; i++) {                             \
+         ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1);            \
+         *((ETYPE *)vd + H(i)) = *(vt + H(i));                        \
+     }                                                                \
++    env->vstart = 0;                                                 \
+ }
+ 
+ GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t,  H1)
+@@ -2032,12 +2069,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,               \
+     uint32_t vl = env->vl;                                           \
+     uint32_t i;                                                      \
+                                                                      \
+-    for (i = 0; i < vl; i++) {                                       \
++    for (i = env->vstart; i < vl; i++) {                             \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                           \
+         ETYPE d = (!vext_elem_mask(v0, i) ? s2 :                     \
+                    (ETYPE)(target_long)s1);                          \
+         *((ETYPE *)vd + H(i)) = d;                                   \
+     }                                                                \
++    env->vstart = 0;                                                 \
+ }
+ 
+ GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t,  H1)
+@@ -2074,12 +2112,13 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
+              uint32_t vl, uint32_t vm, int vxrm,
+              opivv2_rm_fn *fn)
+ {
+-    for (uint32_t i = 0; i < vl; i++) {
++    for (uint32_t i = env->vstart; i < vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, vs1, vs2, i, env, vxrm);
+     }
++    env->vstart = 0;
+ }
+ 
+ static inline void
+@@ -2190,12 +2229,13 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
+              uint32_t vl, uint32_t vm, int vxrm,
+              opivx2_rm_fn *fn)
+ {
+-    for (uint32_t i = 0; i < vl; i++) {
++    for (uint32_t i = env->vstart; i < vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+         fn(vd, s1, vs2, i, env, vxrm);
+     }
++    env->vstart = 0;
+ }
+ 
+ static inline void
+@@ -2977,12 +3017,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+                                                           \
+-    for (i = 0; i < vl; i++) {                            \
++    for (i = env->vstart; i < vl; i++) {                  \
+         if (!vm && !vext_elem_mask(v0, i)) {              \
+             continue;                                     \
+         }                                                 \
+         do_##NAME(vd, vs1, vs2, i, env);                  \
+     }                                                     \
++    env->vstart = 0;                                      \
+ }
+ 
+ RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add)
+@@ -3009,12 +3050,13 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1,        \
+     uint32_t vl = env->vl;                                \
+     uint32_t i;                                           \
+                                                           \
+-    for (i = 0; i < vl; i++) {                            \
++    for (i = env->vstart; i < vl; i++) {                  \
+         if (!vm && !vext_elem_mask(v0, i)) {              \
+             continue;                                     \
+         }                                                 \
+         do_##NAME(vd, s1, vs2, i, env);                   \
+     }                                                     \
++    env->vstart = 0;                                      \
+ }
+ 
+ RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add)
+@@ -3580,12 +3622,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+     if (vl == 0) {                                     \
+         return;                                        \
+     }                                                  \
+-    for (i = 0; i < vl; i++) {                         \
++    for (i = env->vstart; i < vl; i++) {               \
+         if (!vm && !vext_elem_mask(v0, i)) {           \
+             continue;                                  \
+         }                                              \
+         do_##NAME(vd, vs2, i, env);                    \
+     }                                                  \
++    env->vstart = 0;                                   \
+ }
+ 
+ RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt)
+@@ -3716,7 +3759,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+     uint32_t vl = env->vl;                                    \
+     uint32_t i;                                               \
+                                                               \
+-    for (i = 0; i < vl; i++) {                                \
++    for (i = env->vstart; i < vl; i++) {                      \
+         ETYPE s1 = *((ETYPE *)vs1 + H(i));                    \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+         if (!vm && !vext_elem_mask(v0, i)) {                  \
+@@ -3725,6 +3768,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,   \
+         vext_set_elem_mask(vd, i,                             \
+                            DO_OP(s2, s1, &env->fp_status));   \
+     }                                                         \
++    env->vstart = 0;                                          \
+ }
+ 
+ GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet)
+@@ -3739,7 +3783,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2,       \
+     uint32_t vl = env->vl;                                          \
+     uint32_t i;                                                     \
+                                                                     \
+-    for (i = 0; i < vl; i++) {                                      \
++    for (i = env->vstart; i < vl; i++) {                            \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                          \
+         if (!vm && !vext_elem_mask(v0, i)) {                        \
+             continue;                                               \
+@@ -3747,6 +3791,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2,       \
+         vext_set_elem_mask(vd, i,                                   \
+                            DO_OP(s2, (ETYPE)s1, &env->fp_status));  \
+     }                                                               \
++    env->vstart = 0;                                                \
+ }
+ 
+ GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet)
+@@ -3855,12 +3900,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,       \
+     uint32_t vl = env->vl;                             \
+     uint32_t i;                                        \
+                                                        \
+-    for (i = 0; i < vl; i++) {                         \
++    for (i = env->vstart; i < vl; i++) {               \
+         if (!vm && !vext_elem_mask(v0, i)) {           \
+             continue;                                  \
+         }                                              \
+         do_##NAME(vd, vs2, i);                         \
+     }                                                  \
++    env->vstart = 0;                                   \
+ }
+ 
+ target_ulong fclass_h(uint64_t frs1)
+@@ -3936,11 +3982,12 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+     uint32_t vl = env->vl;                                    \
+     uint32_t i;                                               \
+                                                               \
+-    for (i = 0; i < vl; i++) {                                \
++    for (i = env->vstart; i < vl; i++) {                      \
+         ETYPE s2 = *((ETYPE *)vs2 + H(i));                    \
+         *((ETYPE *)vd + H(i))                                 \
+           = (!vm && !vext_elem_mask(v0, i) ? s2 : s1);        \
+     }                                                         \
++    env->vstart = 0;                                          \
+ }
+ 
+ GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
+@@ -4084,7 +4131,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+     uint32_t i;                                           \
+     TD s1 =  *((TD *)vs1 + HD(0));                        \
+                                                           \
+-    for (i = 0; i < vl; i++) {                            \
++    for (i = env->vstart; i < vl; i++) {                  \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                  \
+         if (!vm && !vext_elem_mask(v0, i)) {              \
+             continue;                                     \
+@@ -4092,6 +4139,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+         s1 = OP(s1, (TD)s2);                              \
+     }                                                     \
+     *((TD *)vd + HD(0)) = s1;                             \
++    env->vstart = 0;                                      \
+ }
+ 
+ /* vd[0] = sum(vs1[0], vs2[*]) */
+@@ -4164,7 +4212,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,           \
+     uint32_t i;                                            \
+     TD s1 =  *((TD *)vs1 + HD(0));                         \
+                                                            \
+-    for (i = 0; i < vl; i++) {                             \
++    for (i = env->vstart; i < vl; i++) {                   \
+         TS2 s2 = *((TS2 *)vs2 + HS2(i));                   \
+         if (!vm && !vext_elem_mask(v0, i)) {               \
+             continue;                                      \
+@@ -4172,6 +4220,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,           \
+         s1 = OP(s1, (TD)s2, &env->fp_status);              \
+     }                                                      \
+     *((TD *)vd + HD(0)) = s1;                              \
++    env->vstart = 0;                                       \
+ }
+ 
+ /* Unordered sum */
+@@ -4199,7 +4248,7 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+     uint32_t i;
+     uint32_t s1 =  *((uint32_t *)vs1 + H4(0));
+ 
+-    for (i = 0; i < vl; i++) {
++    for (i = env->vstart; i < vl; i++) {
+         uint16_t s2 = *((uint16_t *)vs2 + H2(i));
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+@@ -4208,6 +4257,7 @@ void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+                          &env->fp_status);
+     }
+     *((uint32_t *)vd + H4(0)) = s1;
++    env->vstart = 0;
+ }
+ 
+ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+@@ -4218,7 +4268,7 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+     uint32_t i;
+     uint64_t s1 =  *((uint64_t *)vs1);
+ 
+-    for (i = 0; i < vl; i++) {
++    for (i = env->vstart; i < vl; i++) {
+         uint32_t s2 = *((uint32_t *)vs2 + H4(i));
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+@@ -4227,6 +4277,7 @@ void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+                          &env->fp_status);
+     }
+     *((uint64_t *)vd) = s1;
++    env->vstart = 0;
+ }
+ 
+ /*
+@@ -4242,11 +4293,12 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
+     uint32_t i;                                           \
+     int a, b;                                             \
+                                                           \
+-    for (i = 0; i < vl; i++) {                            \
++    for (i = env->vstart; i < vl; i++) {                  \
+         a = vext_elem_mask(vs1, i);                       \
+         b = vext_elem_mask(vs2, i);                       \
+         vext_set_elem_mask(vd, i, OP(b, a));              \
+     }                                                     \
++    env->vstart = 0;                                      \
+ }
+ 
+ #define DO_NAND(N, M)  (!(N & M))
+@@ -4273,13 +4325,14 @@ target_ulong HELPER(vpopc_m)(void *v0, void *vs2, CPURISCVState *env,
+     uint32_t vl = env->vl;
+     int i;
+ 
+-    for (i = 0; i < vl; i++) {
++    for (i = env->vstart; i < vl; i++) {
+         if (vm || vext_elem_mask(v0, i)) {
+             if (vext_elem_mask(vs2, i)) {
+                 cnt++;
+             }
+         }
+     }
++    env->vstart = 0;
+     return cnt;
+ }
+ 
+@@ -4291,13 +4344,14 @@ target_ulong HELPER(vfirst_m)(void *v0, void *vs2, CPURISCVState *env,
+     uint32_t vl = env->vl;
+     int i;
+ 
+-    for (i = 0; i < vl; i++) {
++    for (i = env->vstart; i < vl; i++) {
+         if (vm || vext_elem_mask(v0, i)) {
+             if (vext_elem_mask(vs2, i)) {
+                 return i;
+             }
+         }
+     }
++    env->vstart = 0;
+     return -1LL;
+ }
+ 
+@@ -4315,7 +4369,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
+     int i;
+     bool first_mask_bit = false;
+ 
+-    for (i = 0; i < vl; i++) {
++    for (i = env->vstart; i < vl; i++) {
+         if (!vm && !vext_elem_mask(v0, i)) {
+             continue;
+         }
+@@ -4339,6 +4393,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
+             }
+         }
+     }
++    env->vstart = 0;
+ }
+ 
+ void HELPER(vmsbf_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
+@@ -4369,7 +4424,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env,      \
+     uint32_t sum = 0;                                                     \
+     int i;                                                                \
+                                                                           \
+-    for (i = 0; i < vl; i++) {                                            \
++    for (i = env->vstart; i < vl; i++) {                                  \
+         if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+@@ -4378,6 +4433,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env,      \
+             sum++;                                                        \
+         }                                                                 \
+     }                                                                     \
++    env->vstart = 0;                                                      \
+ }
+ 
+ GEN_VEXT_VIOTA_M(viota_m_b, uint8_t,  H1)
+@@ -4393,12 +4449,13 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc)  \
+     uint32_t vl = env->vl;                                                \
+     int i;                                                                \
+                                                                           \
+-    for (i = 0; i < vl; i++) {                                            \
++    for (i = env->vstart; i < vl; i++) {                                  \
+         if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+         *((ETYPE *)vd + H(i)) = i;                                        \
+     }                                                                     \
++    env->vstart = 0;                                                      \
+ }
+ 
+ GEN_VEXT_VID_V(vid_v_b, uint8_t,  H1)
+@@ -4417,9 +4474,10 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+ {                                                                         \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+-    target_ulong offset = s1, i;                                          \
++    target_ulong offset = s1, i_min, i;                                   \
+                                                                           \
+-    for (i = offset; i < vl; i++) {                                       \
++    i_min = MAX(env->vstart, offset);                                     \
++    for (i = i_min; i < vl; i++) {                                        \
+         if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+@@ -4442,8 +4500,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+     uint32_t vl = env->vl;                                                \
+     target_ulong i_max, i;                                                \
+                                                                           \
+-    i_max = MIN(s1 < vlmax ? vlmax - s1 : 0, vl);                         \
+-    for (i = 0; i < i_max; ++i) {                                         \
++    i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart);       \
++    for (i = env->vstart; i < i_max; ++i) {                               \
+         if (vm || vext_elem_mask(v0, i)) {                                \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1));          \
+         }                                                                 \
+@@ -4454,6 +4512,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+             *((ETYPE *)vd + H(i)) = 0;                                    \
+         }                                                                 \
+     }                                                                     \
++                                                                          \
++    env->vstart = 0;                                                      \
+ }
+ 
+ /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
+@@ -4471,7 +4531,7 @@ static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
+     uint32_t vl = env->vl;                                                  \
+     uint32_t i;                                                             \
+                                                                             \
+-    for (i = 0; i < vl; i++) {                                              \
++    for (i = env->vstart; i < vl; i++) {                                    \
+         if (!vm && !vext_elem_mask(v0, i)) {                                \
+             continue;                                                       \
+         }                                                                   \
+@@ -4481,6 +4541,7 @@ static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1));             \
+         }                                                                   \
+     }                                                                       \
++    env->vstart = 0;                                                        \
+ }
+ 
+ GEN_VEXT_VSLIE1UP(8,  H1)
+@@ -4510,7 +4571,7 @@ static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
+     uint32_t vl = env->vl;                                                    \
+     uint32_t i;                                                               \
+                                                                               \
+-    for (i = 0; i < vl; i++) {                                                \
++    for (i = env->vstart; i < vl; i++) {                                      \
+         if (!vm && !vext_elem_mask(v0, i)) {                                  \
+             continue;                                                         \
+         }                                                                     \
+@@ -4520,6 +4581,7 @@ static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1));               \
+         }                                                                     \
+     }                                                                         \
++    env->vstart = 0;                                                          \
+ }
+ 
+ GEN_VEXT_VSLIDE1DOWN(8,  H1)
+@@ -4570,13 +4632,13 @@ GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64)
+ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+                   CPURISCVState *env, uint32_t desc)                      \
+ {                                                                         \
+-    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS1)));             \
++    uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2)));             \
+     uint32_t vm = vext_vm(desc);                                          \
+     uint32_t vl = env->vl;                                                \
+     uint64_t index;                                                       \
+     uint32_t i;                                                           \
+                                                                           \
+-    for (i = 0; i < vl; i++) {                                            \
++    for (i = env->vstart; i < vl; i++) {                                  \
+         if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+@@ -4587,6 +4649,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+             *((TS2 *)vd + HS2(i)) = *((TS2 *)vs2 + HS2(index));           \
+         }                                                                 \
+     }                                                                     \
++    env->vstart = 0;                                                      \
+ }
+ 
+ /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
+@@ -4610,7 +4673,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+     uint64_t index = s1;                                                  \
+     uint32_t i;                                                           \
+                                                                           \
+-    for (i = 0; i < vl; i++) {                                            \
++    for (i = env->vstart; i < vl; i++) {                                  \
+         if (!vm && !vext_elem_mask(v0, i)) {                              \
+             continue;                                                     \
+         }                                                                 \
+@@ -4620,6 +4683,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2,         \
+             *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index));           \
+         }                                                                 \
+     }                                                                     \
++    env->vstart = 0;                                                      \
+ }
+ 
+ /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
+@@ -4636,13 +4700,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2,               \
+     uint32_t vl = env->vl;                                                \
+     uint32_t num = 0, i;                                                  \
+                                                                           \
+-    for (i = 0; i < vl; i++) {                                            \
++    for (i = env->vstart; i < vl; i++) {                                  \
+         if (!vext_elem_mask(vs1, i)) {                                    \
+             continue;                                                     \
+         }                                                                 \
+         *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i));                 \
+         num++;                                                            \
+     }                                                                     \
++    env->vstart = 0;                                                      \
+ }
+ 
+ /* Compress into vd elements of vs2 where vs1 is enabled */
+@@ -4651,6 +4716,27 @@ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2)
+ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4)
+ GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8)
+ 
++/* Vector Whole Register Move */
++#define GEN_VEXT_VMV_WHOLE(NAME, LEN)                      \
++void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
++                  uint32_t desc)                           \
++{                                                          \
++    /* EEW = 8 */                                          \
++    uint32_t maxsz = simd_maxsz(desc);                     \
++    uint32_t i = env->vstart;                              \
++                                                           \
++    memcpy((uint8_t *)vd + H1(i),                          \
++           (uint8_t *)vs2 + H1(i),                         \
++           maxsz - env->vstart);                           \
++                                                           \
++    env->vstart = 0;                                       \
++}
++
++GEN_VEXT_VMV_WHOLE(vmv1r_v, 1)
++GEN_VEXT_VMV_WHOLE(vmv2r_v, 2)
++GEN_VEXT_VMV_WHOLE(vmv4r_v, 4)
++GEN_VEXT_VMV_WHOLE(vmv8r_v, 8)
++
+ /* Vector Integer Extension */
+ #define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1)            \
+ void HELPER(NAME)(void *vd, void *v0, void *vs2,                 \
+@@ -4660,12 +4746,13 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,                 \
+     uint32_t vm = vext_vm(desc);                                 \
+     uint32_t i;                                                  \
+                                                                  \
+-    for (i = 0; i < vl; i++) {                                   \
++    for (i = env->vstart; i < vl; i++) {                         \
+         if (!vm && !vext_elem_mask(v0, i)) {                     \
+             continue;                                            \
+         }                                                        \
+         *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i));       \
+     }                                                            \
++    env->vstart = 0;                                             \
+ }
+ 
+ GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t,  H2, H1)
+-- 
+2.33.1
+

+ 98 - 0
recipes-devtools/qemu/qemu/0074-target-riscv-rvv-1.0-trigger-illegal-instruction-exc.patch

@@ -0,0 +1,98 @@
+From a2fcbad989e7e80e24876cf81fa5fa7b81df71ec Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 29 Sep 2020 10:45:54 +0800
+Subject: [PATCH 074/107] target/riscv: rvv-1.0: trigger illegal instruction
+ exception if frm is not valid
+
+If the frm field contains an invalid rounding mode (101-111),
+attempting to execute any vector floating-point instruction, even
+those that do not depend on the rounding mode, will raise an illegal
+instruction exception.
+
+Call gen_set_rm() with DYN rounding mode to check and trigger illegal
+instruction exception if frm field contains invalid value at run-time
+for vector floating-point instructions.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/insn_trans/trans_rvv.c.inc | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index b3d2a9113e..f032a596b5 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2605,6 +2605,10 @@ static bool do_opfv(DisasContext *s, arg_rmr *a,
+                     int rm)
+ {
+     if (checkfn(s, a)) {
++        if (rm != RISCV_FRM_DYN) {
++            gen_set_rm(s, RISCV_FRM_DYN);
++        }
++
+         uint32_t data = 0;
+         TCGLabel *over = gen_new_label();
+         gen_set_rm(s, rm);
+@@ -2690,6 +2694,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+         require_rvf(s) &&
+         vext_check_isa_ill(s) &&
+         require_align(a->rd, s->lmul)) {
++        gen_set_rm(s, RISCV_FRM_DYN);
++
+         TCGv_i64 t1;
+ 
+         if (s->vl_eq_vlmax) {
+@@ -2772,6 +2778,10 @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+ {                                                                  \
+     if (opfv_widen_check(s, a)) {                                  \
++        if (FRM != RISCV_FRM_DYN) {                                \
++            gen_set_rm(s, RISCV_FRM_DYN);                          \
++        }                                                          \
++                                                                   \
+         uint32_t data = 0;                                         \
+         static gen_helper_gvec_3_ptr * const fns[2] = {            \
+             gen_helper_##HELPER##_h,                               \
+@@ -2857,6 +2867,10 @@ static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+ {                                                                  \
+     if (opfv_narrow_check(s, a)) {                                 \
++        if (FRM != RISCV_FRM_DYN) {                                \
++            gen_set_rm(s, RISCV_FRM_DYN);                          \
++        }                                                          \
++                                                                   \
+         uint32_t data = 0;                                         \
+         static gen_helper_gvec_3_ptr * const fns[2] = {            \
+             gen_helper_##HELPER##_h,                               \
+@@ -2897,6 +2911,10 @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
+ {                                                                  \
+     if (opxfv_narrow_check(s, a)) {                                \
++        if (FRM != RISCV_FRM_DYN) {                                \
++            gen_set_rm(s, RISCV_FRM_DYN);                          \
++        }                                                          \
++                                                                   \
+         uint32_t data = 0;                                         \
+         static gen_helper_gvec_3_ptr * const fns[3] = {            \
+             gen_helper_##HELPER##_b,                               \
+@@ -3372,6 +3390,8 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
+     if (require_rvv(s) &&
+         require_rvf(s) &&
+         vext_check_isa_ill(s)) {
++        gen_set_rm(s, RISCV_FRM_DYN);
++
+         unsigned int ofs = (8 << s->sew);
+         unsigned int len = 64 - ofs;
+         TCGv_i64 t_nan;
+@@ -3397,6 +3417,8 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
+     if (require_rvv(s) &&
+         require_rvf(s) &&
+         vext_check_isa_ill(s)) {
++        gen_set_rm(s, RISCV_FRM_DYN);
++
+         /* The instructions ignore LMUL and vector register group. */
+         TCGv_i64 t1;
+         TCGLabel *over = gen_new_label();
+-- 
+2.33.1
+

+ 55 - 0
recipes-devtools/qemu/qemu/0075-target-riscv-rvv-1.0-set-mstatus.SD-bit-when-writing.patch

@@ -0,0 +1,55 @@
+From 1e98c970b4a3f3e359edd1eb883774b977232019 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 6 Oct 2020 23:48:24 +0800
+Subject: [PATCH 075/107] target/riscv: rvv-1.0: set mstatus.SD bit when
+ writing vector CSRs
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
+---
+ target/riscv/csr.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index b932e28bbf..900c2fa50a 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -293,7 +293,7 @@ static int write_vxrm(CPURISCVState *env, int csrno, target_ulong val)
+     if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+-    env->mstatus |= MSTATUS_VS;
++    env->mstatus |= MSTATUS_VS | MSTATUS_SD;
+ #endif
+ 
+     env->vxrm = val;
+@@ -312,7 +312,7 @@ static int write_vxsat(CPURISCVState *env, int csrno, target_ulong val)
+     if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+-    env->mstatus |= MSTATUS_VS;
++    env->mstatus |= MSTATUS_VS | MSTATUS_SD;
+ #endif
+ 
+     env->vxsat = val;
+@@ -331,7 +331,7 @@ static int write_vstart(CPURISCVState *env, int csrno, target_ulong val)
+     if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+-    env->mstatus |= MSTATUS_VS;
++    env->mstatus |= MSTATUS_VS | MSTATUS_SD;
+ #endif
+ 
+     /*
+@@ -354,7 +354,7 @@ static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
+     if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+-    env->mstatus |= MSTATUS_VS;
++    env->mstatus |= MSTATUS_VS | MSTATUS_SD;
+ #endif
+ 
+     env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
+-- 
+2.33.1
+

+ 259 - 0
recipes-devtools/qemu/qemu/0076-target-riscv-gdb-support-vector-registers-for-rv64-r.patch

@@ -0,0 +1,259 @@
+From 2b5684000804bcda8519d35e51e015d436ac9c02 Mon Sep 17 00:00:00 2001
+From: Hsiangkai Wang <kai.wang@sifive.com>
+Date: Tue, 12 Jan 2021 16:44:49 +0800
+Subject: [PATCH 076/107] target/riscv: gdb: support vector registers for rv64
+ & rv32
+
+Signed-off-by: Hsiangkai Wang <kai.wang@sifive.com>
+Signed-off-by: Greentime Hu <greentime.hu@sifive.com>
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/cpu.c     |   2 +
+ target/riscv/cpu.h     |   1 +
+ target/riscv/gdbstub.c | 184 +++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 187 insertions(+)
+
+diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
+index 4a29175e20..f6d79c5aa9 100644
+--- a/target/riscv/cpu.c
++++ b/target/riscv/cpu.c
+@@ -577,6 +577,8 @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
+ 
+     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
+         return cpu->dyn_csr_xml;
++    } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
++        return cpu->dyn_vreg_xml;
+     }
+ 
+     return NULL;
+diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
+index d613520211..e223ab550b 100644
+--- a/target/riscv/cpu.h
++++ b/target/riscv/cpu.h
+@@ -279,6 +279,7 @@ struct RISCVCPU {
+     CPURISCVState env;
+ 
+     char *dyn_csr_xml;
++    char *dyn_vreg_xml;
+ 
+     /* Configuration Settings */
+     struct {
+diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
+index 5f96b7ea2a..dd883612c7 100644
+--- a/target/riscv/gdbstub.c
++++ b/target/riscv/gdbstub.c
+@@ -20,6 +20,32 @@
+ #include "exec/gdbstub.h"
+ #include "cpu.h"
+ 
++struct TypeSize {
++    const char *gdb_type;
++    const char *id;
++    int size;
++    const char suffix;
++};
++
++static const struct TypeSize vec_lanes[] = {
++    /* quads */
++    { "uint128", "quads", 128, 'q' },
++    /* 64 bit */
++    { "uint64", "longs", 64, 'l' },
++    /* 32 bit */
++    { "uint32", "words", 32, 'w' },
++    /* 16 bit */
++    { "uint16", "shorts", 16, 's' },
++    /*
++     * TODO: currently there is no reliable way of telling
++     * if the remote gdb actually understands ieee_half so
++     * we don't expose it in the target description for now.
++     * { "ieee_half", 16, 'h', 'f' },
++     */
++    /* bytes */
++    { "uint8", "bytes", 8, 'b' },
++};
++
+ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+ {
+     RISCVCPU *cpu = RISCV_CPU(cs);
+@@ -101,6 +127,96 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n)
+     return 0;
+ }
+ 
++/*
++ * Convert register index number passed by GDB to the correspond
++ * vector CSR number. Vector CSRs are defined after vector registers
++ * in dynamic generated riscv-vector.xml, thus the starting register index
++ * of vector CSRs is 32.
++ * Return 0 if register index number is out of range.
++ */
++static int riscv_gdb_vector_csrno(int num_regs)
++{
++    /*
++     * The order of vector CSRs in the switch case
++     * should match with the order defined in csr_ops[].
++     */
++    switch (num_regs) {
++    case 32:
++        return CSR_VSTART;
++    case 33:
++        return CSR_VXSAT;
++    case 34:
++        return CSR_VXRM;
++    case 35:
++        return CSR_VCSR;
++    case 36:
++        return CSR_VL;
++    case 37:
++        return CSR_VTYPE;
++    case 38:
++        return CSR_VLENB;
++    default:
++        /* Unknown register. */
++        return 0;
++    }
++}
++
++static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n)
++{
++    uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
++    if (n < 32) {
++        int i;
++        int cnt = 0;
++        for (i = 0; i < vlenb; i += 8) {
++            cnt += gdb_get_reg64(buf,
++                                 env->vreg[(n * vlenb + i) / 8]);
++        }
++        return cnt;
++    }
++
++    int csrno = riscv_gdb_vector_csrno(n);
++
++    if (!csrno) {
++        return 0;
++    }
++
++    target_ulong val = 0;
++    int result = riscv_csrrw_debug(env, csrno, &val, 0, 0);
++
++    if (result == 0) {
++        return gdb_get_regl(buf, val);
++    }
++
++    return 0;
++}
++
++static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n)
++{
++    uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
++    if (n < 32) {
++        int i;
++        for (i = 0; i < vlenb; i += 8) {
++            env->vreg[(n * vlenb + i) / 8] = ldq_p(mem_buf + i);
++        }
++        return vlenb;
++    }
++
++    int csrno = riscv_gdb_vector_csrno(n);
++
++    if (!csrno) {
++        return 0;
++    }
++
++    target_ulong val = ldtul_p(mem_buf);
++    int result = riscv_csrrw_debug(env, csrno, NULL, val, -1);
++
++    if (result == 0) {
++        return sizeof(target_ulong);
++    }
++
++    return 0;
++}
++
+ static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n)
+ {
+     if (n < CSR_TABLE_SIZE) {
+@@ -187,6 +303,68 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
+     return CSR_TABLE_SIZE;
+ }
+ 
++static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg)
++{
++    RISCVCPU *cpu = RISCV_CPU(cs);
++    GString *s = g_string_new(NULL);
++    g_autoptr(GString) ts = g_string_new("");
++    int reg_width = cpu->cfg.vlen;
++    int num_regs = 0;
++    int i;
++
++    g_string_printf(s, "<?xml version=\"1.0\"?>");
++    g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
++    g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.vector\">");
++
++    /* First define types and totals in a whole VL */
++    for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
++        int count = reg_width / vec_lanes[i].size;
++        g_string_printf(ts, "%s", vec_lanes[i].id);
++        g_string_append_printf(s,
++                               "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
++                               ts->str, vec_lanes[i].gdb_type, count);
++    }
++
++    /* Define unions */
++    g_string_append_printf(s, "<union id=\"riscv_vector\">");
++    for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
++        g_string_append_printf(s, "<field name=\"%c\" type=\"%s\"/>",
++                               vec_lanes[i].suffix,
++                               vec_lanes[i].id);
++    }
++    g_string_append(s, "</union>");
++
++    /* Define vector registers */
++    for (i = 0; i < 32; i++) {
++        g_string_append_printf(s,
++                               "<reg name=\"v%d\" bitsize=\"%d\""
++                               " regnum=\"%d\" group=\"vector\""
++                               " type=\"riscv_vector\"/>",
++                               i, reg_width, base_reg++);
++        num_regs++;
++    }
++
++    /* Define vector CSRs */
++    const char *vector_csrs[7] = {
++        "vstart", "vxsat", "vxrm", "vcsr",
++        "vl", "vtype", "vlenb"
++    };
++
++    for (i = 0; i < 7; i++) {
++        g_string_append_printf(s,
++                               "<reg name=\"%s\" bitsize=\"%d\""
++                               " regnum=\"%d\" group=\"vector\""
++                               " type=\"int\"/>",
++                               vector_csrs[i], TARGET_LONG_BITS, base_reg++);
++        num_regs++;
++    }
++
++    g_string_append_printf(s, "</feature>");
++
++    cpu->dyn_vreg_xml = g_string_free(s, false);
++    return num_regs;
++}
++
+ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
+ {
+     RISCVCPU *cpu = RISCV_CPU(cs);
+@@ -198,6 +376,12 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
+         gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu,
+                                  36, "riscv-32bit-fpu.xml", 0);
+     }
++    if (env->misa & RVV) {
++        gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector,
++                                 ricsv_gen_dynamic_vector_xml(cs,
++                                                              cs->gdb_num_regs),
++                                 "riscv-vector.xml", 0);
++    }
+ #if defined(TARGET_RISCV32)
+     gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual,
+                              1, "riscv-32bit-virtual.xml", 0);
+-- 
+2.33.1
+

+ 260 - 0
recipes-devtools/qemu/qemu/0077-target-riscv-rvv-1.0-floating-point-reciprocal-squar.patch

@@ -0,0 +1,260 @@
+From 56caeae679dde82a2da2536758dabd58e2d0becf Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 19 Nov 2020 14:36:04 +0800
+Subject: [PATCH 077/107] target/riscv: rvv-1.0: floating-point reciprocal
+ square-root estimate instruction
+
+Implement the floating-point reciprocal square-root estimate to 7 bits
+instruction.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/helper.h                   |   4 +
+ target/riscv/insn32.decode              |   1 +
+ target/riscv/insn_trans/trans_rvv.c.inc |   1 +
+ target/riscv/vector_helper.c            | 183 ++++++++++++++++++++++++
+ 4 files changed, 189 insertions(+)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index e233548623..807d1f7202 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -916,6 +916,10 @@ DEF_HELPER_5(vfsqrt_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfsqrt_v_w, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfsqrt_v_d, void, ptr, ptr, ptr, env, i32)
+ 
++DEF_HELPER_5(vfrsqrt7_v_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfrsqrt7_v_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfrsqrt7_v_d, void, ptr, ptr, ptr, env, i32)
++
+ DEF_HELPER_6(vfmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vfmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vfmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 65d84f0b7c..0c7fd17d56 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -535,6 +535,7 @@ vfwmsac_vf      111110 . ..... ..... 101 ..... 1010111 @r_vm
+ vfwnmsac_vv     111111 . ..... ..... 001 ..... 1010111 @r_vm
+ vfwnmsac_vf     111111 . ..... ..... 101 ..... 1010111 @r_vm
+ vfsqrt_v        010011 . ..... 00000 001 ..... 1010111 @r2_vm
++vfrsqrt7_v      010011 . ..... 00100 001 ..... 1010111 @r2_vm
+ vfmin_vv        000100 . ..... ..... 001 ..... 1010111 @r_vm
+ vfmin_vf        000100 . ..... ..... 101 ..... 1010111 @r_vm
+ vfmax_vv        000110 . ..... ..... 001 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index f032a596b5..a53f0158ee 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2638,6 +2638,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)  \
+ }
+ 
+ GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
++GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN)
+ 
+ /* Vector Floating-Point MIN/MAX Instructions */
+ GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 7a2d2e7949..7d8f05ae21 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -18,6 +18,7 @@
+ 
+ #include "qemu/osdep.h"
+ #include "qemu/host-utils.h"
++#include "qemu/bitops.h"
+ #include "cpu.h"
+ #include "exec/memop.h"
+ #include "exec/exec-all.h"
+@@ -3638,6 +3639,188 @@ GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2)
+ GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4)
+ GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8)
+ 
++/*
++ * Vector Floating-Point Reciprocal Square-Root Estimate Instruction
++ *
++ * Adapted from riscv-v-spec recip.c:
++ * https://github.com/riscv/riscv-v-spec/blob/master/recip.c
++ */
++static uint64_t frsqrt7(uint64_t f, int exp_size, int frac_size)
++{
++    uint64_t sign = extract64(f, frac_size + exp_size, 1);
++    uint64_t exp = extract64(f, frac_size, exp_size);
++    uint64_t frac = extract64(f, 0, frac_size);
++
++    const uint8_t lookup_table[] = {
++        52, 51, 50, 48, 47, 46, 44, 43,
++        42, 41, 40, 39, 38, 36, 35, 34,
++        33, 32, 31, 30, 30, 29, 28, 27,
++        26, 25, 24, 23, 23, 22, 21, 20,
++        19, 19, 18, 17, 16, 16, 15, 14,
++        14, 13, 12, 12, 11, 10, 10, 9,
++        9, 8, 7, 7, 6, 6, 5, 4,
++        4, 3, 3, 2, 2, 1, 1, 0,
++        127, 125, 123, 121, 119, 118, 116, 114,
++        113, 111, 109, 108, 106, 105, 103, 102,
++        100, 99, 97, 96, 95, 93, 92, 91,
++        90, 88, 87, 86, 85, 84, 83, 82,
++        80, 79, 78, 77, 76, 75, 74, 73,
++        72, 71, 70, 70, 69, 68, 67, 66,
++        65, 64, 63, 63, 62, 61, 60, 59,
++        59, 58, 57, 56, 56, 55, 54, 53
++    };
++    const int precision = 7;
++
++    if (exp == 0 && frac != 0) { /* subnormal */
++        /* Normalize the subnormal. */
++        while (extract64(frac, frac_size - 1, 1) == 0) {
++            exp--;
++            frac <<= 1;
++        }
++
++        frac = (frac << 1) & MAKE_64BIT_MASK(0, frac_size);
++    }
++
++    int idx = ((exp & 1) << (precision - 1)) |
++                (frac >> (frac_size - precision + 1));
++    uint64_t out_frac = (uint64_t)(lookup_table[idx]) <<
++                            (frac_size - precision);
++    uint64_t out_exp = (3 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp) / 2;
++
++    uint64_t val = 0;
++    val = deposit64(val, 0, frac_size, out_frac);
++    val = deposit64(val, frac_size, exp_size, out_exp);
++    val = deposit64(val, frac_size + exp_size, 1, sign);
++    return val;
++}
++
++static float16 frsqrt7_h(float16 f, float_status *s)
++{
++    int exp_size = 5, frac_size = 10;
++    bool sign = float16_is_neg(f);
++
++    /*
++     * frsqrt7(sNaN) = canonical NaN
++     * frsqrt7(-inf) = canonical NaN
++     * frsqrt7(-normal) = canonical NaN
++     * frsqrt7(-subnormal) = canonical NaN
++     */
++    if (float16_is_signaling_nan(f, s) ||
++            (float16_is_infinity(f) && sign) ||
++            (float16_is_normal(f) && sign) ||
++            (float16_is_zero_or_denormal(f) && !float16_is_zero(f) && sign)) {
++        s->float_exception_flags |= float_flag_invalid;
++        return float16_default_nan(s);
++    }
++
++    /* frsqrt7(qNaN) = canonical NaN */
++    if (float16_is_quiet_nan(f, s)) {
++        return float16_default_nan(s);
++    }
++
++    /* frsqrt7(+-0) = +-inf */
++    if (float16_is_zero(f)) {
++        s->float_exception_flags |= float_flag_divbyzero;
++        return float16_set_sign(float16_infinity, sign);
++    }
++
++    /* frsqrt7(+inf) = +0 */
++    if (float16_is_infinity(f) && !sign) {
++        return float16_set_sign(float16_zero, sign);
++    }
++
++    /* +normal, +subnormal */
++    uint64_t val = frsqrt7(f, exp_size, frac_size);
++    return make_float16(val);
++}
++
++static float32 frsqrt7_s(float32 f, float_status *s)
++{
++    int exp_size = 8, frac_size = 23;
++    bool sign = float32_is_neg(f);
++
++    /*
++     * frsqrt7(sNaN) = canonical NaN
++     * frsqrt7(-inf) = canonical NaN
++     * frsqrt7(-normal) = canonical NaN
++     * frsqrt7(-subnormal) = canonical NaN
++     */
++    if (float32_is_signaling_nan(f, s) ||
++            (float32_is_infinity(f) && sign) ||
++            (float32_is_normal(f) && sign) ||
++            (float32_is_zero_or_denormal(f) && !float32_is_zero(f) && sign)) {
++        s->float_exception_flags |= float_flag_invalid;
++        return float32_default_nan(s);
++    }
++
++    /* frsqrt7(qNaN) = canonical NaN */
++    if (float32_is_quiet_nan(f, s)) {
++        return float32_default_nan(s);
++    }
++
++    /* frsqrt7(+-0) = +-inf */
++    if (float32_is_zero(f)) {
++        s->float_exception_flags |= float_flag_divbyzero;
++        return float32_set_sign(float32_infinity, sign);
++    }
++
++    /* frsqrt7(+inf) = +0 */
++    if (float32_is_infinity(f) && !sign) {
++        return float32_set_sign(float32_zero, sign);
++    }
++
++    /* +normal, +subnormal */
++    uint64_t val = frsqrt7(f, exp_size, frac_size);
++    return make_float32(val);
++}
++
++static float64 frsqrt7_d(float64 f, float_status *s)
++{
++    int exp_size = 11, frac_size = 52;
++    bool sign = float64_is_neg(f);
++
++    /*
++     * frsqrt7(sNaN) = canonical NaN
++     * frsqrt7(-inf) = canonical NaN
++     * frsqrt7(-normal) = canonical NaN
++     * frsqrt7(-subnormal) = canonical NaN
++     */
++    if (float64_is_signaling_nan(f, s) ||
++            (float64_is_infinity(f) && sign) ||
++            (float64_is_normal(f) && sign) ||
++            (float64_is_zero_or_denormal(f) && !float64_is_zero(f) && sign)) {
++        s->float_exception_flags |= float_flag_invalid;
++        return float64_default_nan(s);
++    }
++
++    /* frsqrt7(qNaN) = canonical NaN */
++    if (float64_is_quiet_nan(f, s)) {
++        return float64_default_nan(s);
++    }
++
++    /* frsqrt7(+-0) = +-inf */
++    if (float64_is_zero(f)) {
++        s->float_exception_flags |= float_flag_divbyzero;
++        return float64_set_sign(float64_infinity, sign);
++    }
++
++    /* frsqrt7(+inf) = +0 */
++    if (float64_is_infinity(f) && !sign) {
++        return float64_set_sign(float64_zero, sign);
++    }
++
++    /* +normal, +subnormal */
++    uint64_t val = frsqrt7(f, exp_size, frac_size);
++    return make_float64(val);
++}
++
++RVVCALL(OPFVV1, vfrsqrt7_v_h, OP_UU_H, H2, H2, frsqrt7_h)
++RVVCALL(OPFVV1, vfrsqrt7_v_w, OP_UU_W, H4, H4, frsqrt7_s)
++RVVCALL(OPFVV1, vfrsqrt7_v_d, OP_UU_D, H8, H8, frsqrt7_d)
++GEN_VEXT_V_ENV(vfrsqrt7_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfrsqrt7_v_w, 4, 4)
++GEN_VEXT_V_ENV(vfrsqrt7_v_d, 8, 8)
++
+ /* Vector Floating-Point MIN/MAX Instructions */
+ RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum_noprop)
+ RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum_noprop)
+-- 
+2.33.1
+

+ 260 - 0
recipes-devtools/qemu/qemu/0078-target-riscv-rvv-1.0-floating-point-reciprocal-estim.patch

@@ -0,0 +1,260 @@
+From 2be237127e46c352e487c55800b779c694089aa8 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Thu, 19 Nov 2020 14:40:08 +0800
+Subject: [PATCH 078/107] target/riscv: rvv-1.0: floating-point reciprocal
+ estimate instruction
+
+Implement the floating-point reciprocal estimate to 7 bits instruction.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/helper.h                   |   4 +
+ target/riscv/insn32.decode              |   1 +
+ target/riscv/insn_trans/trans_rvv.c.inc |   1 +
+ target/riscv/vector_helper.c            | 191 ++++++++++++++++++++++++
+ 4 files changed, 197 insertions(+)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index 807d1f7202..a83a7c33c0 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -920,6 +920,10 @@ DEF_HELPER_5(vfrsqrt7_v_h, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfrsqrt7_v_w, void, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_5(vfrsqrt7_v_d, void, ptr, ptr, ptr, env, i32)
+ 
++DEF_HELPER_5(vfrec7_v_h, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfrec7_v_w, void, ptr, ptr, ptr, env, i32)
++DEF_HELPER_5(vfrec7_v_d, void, ptr, ptr, ptr, env, i32)
++
+ DEF_HELPER_6(vfmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vfmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+ DEF_HELPER_6(vfmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index 0c7fd17d56..c90d347af4 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -536,6 +536,7 @@ vfwnmsac_vv     111111 . ..... ..... 001 ..... 1010111 @r_vm
+ vfwnmsac_vf     111111 . ..... ..... 101 ..... 1010111 @r_vm
+ vfsqrt_v        010011 . ..... 00000 001 ..... 1010111 @r2_vm
+ vfrsqrt7_v      010011 . ..... 00100 001 ..... 1010111 @r2_vm
++vfrec7_v        010011 . ..... 00101 001 ..... 1010111 @r2_vm
+ vfmin_vv        000100 . ..... ..... 001 ..... 1010111 @r_vm
+ vfmin_vf        000100 . ..... ..... 101 ..... 1010111 @r_vm
+ vfmax_vv        000110 . ..... ..... 001 ..... 1010111 @r_vm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index a53f0158ee..f916f5d064 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -2639,6 +2639,7 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a)  \
+ 
+ GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
+ GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN)
++GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN)
+ 
+ /* Vector Floating-Point MIN/MAX Instructions */
+ GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 7d8f05ae21..78787af258 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -3821,6 +3821,197 @@ GEN_VEXT_V_ENV(vfrsqrt7_v_h, 2, 2)
+ GEN_VEXT_V_ENV(vfrsqrt7_v_w, 4, 4)
+ GEN_VEXT_V_ENV(vfrsqrt7_v_d, 8, 8)
+ 
++/*
++ * Vector Floating-Point Reciprocal Estimate Instruction
++ *
++ * Adapted from riscv-v-spec recip.c:
++ * https://github.com/riscv/riscv-v-spec/blob/master/recip.c
++ */
++static uint64_t frec7(uint64_t f, int exp_size, int frac_size,
++                      float_status *s)
++{
++    uint64_t sign = extract64(f, frac_size + exp_size, 1);
++    uint64_t exp = extract64(f, frac_size, exp_size);
++    uint64_t frac = extract64(f, 0, frac_size);
++
++    const uint8_t lookup_table[] = {
++        127, 125, 123, 121, 119, 117, 116, 114,
++        112, 110, 109, 107, 105, 104, 102, 100,
++        99, 97, 96, 94, 93, 91, 90, 88,
++        87, 85, 84, 83, 81, 80, 79, 77,
++        76, 75, 74, 72, 71, 70, 69, 68,
++        66, 65, 64, 63, 62, 61, 60, 59,
++        58, 57, 56, 55, 54, 53, 52, 51,
++        50, 49, 48, 47, 46, 45, 44, 43,
++        42, 41, 40, 40, 39, 38, 37, 36,
++        35, 35, 34, 33, 32, 31, 31, 30,
++        29, 28, 28, 27, 26, 25, 25, 24,
++        23, 23, 22, 21, 21, 20, 19, 19,
++        18, 17, 17, 16, 15, 15, 14, 14,
++        13, 12, 12, 11, 11, 10, 9, 9,
++        8, 8, 7, 7, 6, 5, 5, 4,
++        4, 3, 3, 2, 2, 1, 1, 0
++    };
++    const int precision = 7;
++
++    if (exp == 0 && frac != 0) { /* subnormal */
++        /* Normalize the subnormal. */
++        while (extract64(frac, frac_size - 1, 1) == 0) {
++            exp--;
++            frac <<= 1;
++        }
++
++        frac = (frac << 1) & MAKE_64BIT_MASK(0, frac_size);
++
++        if (exp != 0 && exp != UINT64_MAX) {
++            /*
++             * Overflow to inf or max value of same sign,
++             * depending on sign and rounding mode.
++             */
++            s->float_exception_flags |= (float_flag_inexact |
++                                         float_flag_overflow);
++
++            if ((s->float_rounding_mode == float_round_to_zero) ||
++                ((s->float_rounding_mode == float_round_down) && !sign) ||
++                ((s->float_rounding_mode == float_round_up) && sign)) {
++                /* Return greatest/negative finite value. */
++                return (sign << (exp_size + frac_size)) |
++                    (MAKE_64BIT_MASK(frac_size, exp_size) - 1);
++            } else {
++                /* Return +-inf. */
++                return (sign << (exp_size + frac_size)) |
++                    MAKE_64BIT_MASK(frac_size, exp_size);
++            }
++        }
++    }
++
++    int idx = frac >> (frac_size - precision);
++    uint64_t out_frac = (uint64_t)(lookup_table[idx]) <<
++                            (frac_size - precision);
++    uint64_t out_exp = 2 * MAKE_64BIT_MASK(0, exp_size - 1) + ~exp;
++
++    if (out_exp == 0 || out_exp == UINT64_MAX) {
++        /*
++         * The result is subnormal, but don't raise the underflow exception,
++         * because there's no additional loss of precision.
++         */
++        out_frac = (out_frac >> 1) | MAKE_64BIT_MASK(frac_size - 1, 1);
++        if (out_exp == UINT64_MAX) {
++            out_frac >>= 1;
++            out_exp = 0;
++        }
++    }
++
++    uint64_t val = 0;
++    val = deposit64(val, 0, frac_size, out_frac);
++    val = deposit64(val, frac_size, exp_size, out_exp);
++    val = deposit64(val, frac_size + exp_size, 1, sign);
++    return val;
++}
++
++static float16 frec7_h(float16 f, float_status *s)
++{
++    int exp_size = 5, frac_size = 10;
++    bool sign = float16_is_neg(f);
++
++    /* frec7(+-inf) = +-0 */
++    if (float16_is_infinity(f)) {
++        return float16_set_sign(float16_zero, sign);
++    }
++
++    /* frec7(+-0) = +-inf */
++    if (float16_is_zero(f)) {
++        s->float_exception_flags |= float_flag_divbyzero;
++        return float16_set_sign(float16_infinity, sign);
++    }
++
++    /* frec7(sNaN) = canonical NaN */
++    if (float16_is_signaling_nan(f, s)) {
++        s->float_exception_flags |= float_flag_invalid;
++        return float16_default_nan(s);
++    }
++
++    /* frec7(qNaN) = canonical NaN */
++    if (float16_is_quiet_nan(f, s)) {
++        return float16_default_nan(s);
++    }
++
++    /* +-normal, +-subnormal */
++    uint64_t val = frec7(f, exp_size, frac_size, s);
++    return make_float16(val);
++}
++
++static float32 frec7_s(float32 f, float_status *s)
++{
++    int exp_size = 8, frac_size = 23;
++    bool sign = float32_is_neg(f);
++
++    /* frec7(+-inf) = +-0 */
++    if (float32_is_infinity(f)) {
++        return float32_set_sign(float32_zero, sign);
++    }
++
++    /* frec7(+-0) = +-inf */
++    if (float32_is_zero(f)) {
++        s->float_exception_flags |= float_flag_divbyzero;
++        return float32_set_sign(float32_infinity, sign);
++    }
++
++    /* frec7(sNaN) = canonical NaN */
++    if (float32_is_signaling_nan(f, s)) {
++        s->float_exception_flags |= float_flag_invalid;
++        return float32_default_nan(s);
++    }
++
++    /* frec7(qNaN) = canonical NaN */
++    if (float32_is_quiet_nan(f, s)) {
++        return float32_default_nan(s);
++    }
++
++    /* +-normal, +-subnormal */
++    uint64_t val = frec7(f, exp_size, frac_size, s);
++    return make_float32(val);
++}
++
++static float64 frec7_d(float64 f, float_status *s)
++{
++    int exp_size = 11, frac_size = 52;
++    bool sign = float64_is_neg(f);
++
++    /* frec7(+-inf) = +-0 */
++    if (float64_is_infinity(f)) {
++        return float64_set_sign(float64_zero, sign);
++    }
++
++    /* frec7(+-0) = +-inf */
++    if (float64_is_zero(f)) {
++        s->float_exception_flags |= float_flag_divbyzero;
++        return float64_set_sign(float64_infinity, sign);
++    }
++
++    /* frec7(sNaN) = canonical NaN */
++    if (float64_is_signaling_nan(f, s)) {
++        s->float_exception_flags |= float_flag_invalid;
++        return float64_default_nan(s);
++    }
++
++    /* frec7(qNaN) = canonical NaN */
++    if (float64_is_quiet_nan(f, s)) {
++        return float64_default_nan(s);
++    }
++
++    /* +-normal, +-subnormal */
++    uint64_t val = frec7(f, exp_size, frac_size, s);
++    return make_float64(val);
++}
++
++RVVCALL(OPFVV1, vfrec7_v_h, OP_UU_H, H2, H2, frec7_h)
++RVVCALL(OPFVV1, vfrec7_v_w, OP_UU_W, H4, H4, frec7_s)
++RVVCALL(OPFVV1, vfrec7_v_d, OP_UU_D, H8, H8, frec7_d)
++GEN_VEXT_V_ENV(vfrec7_v_h, 2, 2)
++GEN_VEXT_V_ENV(vfrec7_v_w, 4, 4)
++GEN_VEXT_V_ENV(vfrec7_v_d, 8, 8)
++
+ /* Vector Floating-Point MIN/MAX Instructions */
+ RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum_noprop)
+ RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum_noprop)
+-- 
+2.33.1
+

+ 44 - 0
recipes-devtools/qemu/qemu/0079-target-riscv-set-mstatus.SD-bit-when-writing-fp-CSRs.patch

@@ -0,0 +1,44 @@
+From fb2ee51ba0046703085c961fc029bf93f593a5a9 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Tue, 6 Oct 2020 23:47:28 +0800
+Subject: [PATCH 079/107] target/riscv: set mstatus.SD bit when writing fp CSRs
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/csr.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/target/riscv/csr.c b/target/riscv/csr.c
+index 900c2fa50a..cb3816b124 100644
+--- a/target/riscv/csr.c
++++ b/target/riscv/csr.c
+@@ -209,7 +209,7 @@ static int write_fflags(CPURISCVState *env, int csrno, target_ulong val)
+     if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+-    env->mstatus |= MSTATUS_FS;
++    env->mstatus |= MSTATUS_FS | MSTATUS_SD;
+ #endif
+     riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
+     return 0;
+@@ -232,7 +232,7 @@ static int write_frm(CPURISCVState *env, int csrno, target_ulong val)
+     if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+-    env->mstatus |= MSTATUS_FS;
++    env->mstatus |= MSTATUS_FS | MSTATUS_SD;
+ #endif
+     env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
+     return 0;
+@@ -256,7 +256,7 @@ static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val)
+     if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
+         return -RISCV_EXCP_ILLEGAL_INST;
+     }
+-    env->mstatus |= MSTATUS_FS;
++    env->mstatus |= MSTATUS_FS | MSTATUS_SD;
+ #endif
+     env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
+     riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
+-- 
+2.33.1
+

+ 38 - 0
recipes-devtools/qemu/qemu/0080-target-riscv-rvv-1.0-rename-r2_zimm-to-r2_zimm11.patch

@@ -0,0 +1,38 @@
+From 41f16e8fd38d6c633f53e959f58f825bc4a62a7d Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 26 Feb 2021 08:42:04 +0800
+Subject: [PATCH 080/107] target/riscv: rvv-1.0: rename r2_zimm to r2_zimm11
+
+Rename r2_zimm to r2_zimm11 for the upcoming vsetivli instruction.
+vsetivli has 10-bits of zimm but vsetvli has 11-bits of zimm.
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/insn32.decode | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index c90d347af4..bab4259d91 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -77,7 +77,7 @@
+ @r_vm_1  ...... . ..... ..... ... ..... .......    &rmrr vm=1 %rs2 %rs1 %rd
+ @r_vm_0  ...... . ..... ..... ... ..... .......    &rmrr vm=0 %rs2 %rs1 %rd
+ @r_wdvm  ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd
+-@r2_zimm . zimm:11  ..... ... ..... ....... %rs1 %rd
++@r2_zimm11 . zimm:11  ..... ... ..... ....... %rs1 %rd
+ @r2_s    .......   ..... ..... ... ..... ....... %rs2 %rs1
+ 
+ @hfence_gvma ....... ..... .....   ... ..... ....... %rs2 %rs1
+@@ -646,7 +646,7 @@ vsext_vf2       010010 . ..... 00111 010 ..... 1010111 @r2_vm
+ vsext_vf4       010010 . ..... 00101 010 ..... 1010111 @r2_vm
+ vsext_vf8       010010 . ..... 00011 010 ..... 1010111 @r2_vm
+ 
+-vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm
++vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm11
+ vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
+ 
+ # *** RV32Zfh Extension ***
+-- 
+2.33.1
+

+ 82 - 0
recipes-devtools/qemu/qemu/0081-target-riscv-rvv-1.0-add-vsetivli-instruction.patch

@@ -0,0 +1,82 @@
+From 5ba304e191c401c8b484c754d2d33b43667243d1 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Fri, 26 Feb 2021 08:42:17 +0800
+Subject: [PATCH 081/107] target/riscv: rvv-1.0: add vsetivli instruction
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/insn32.decode              |  2 ++
+ target/riscv/insn_trans/trans_rvv.c.inc | 30 +++++++++++++++++++++++++
+ 2 files changed, 32 insertions(+)
+
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index bab4259d91..f6eec02d55 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -78,6 +78,7 @@
+ @r_vm_0  ...... . ..... ..... ... ..... .......    &rmrr vm=0 %rs2 %rs1 %rd
+ @r_wdvm  ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd
+ @r2_zimm11 . zimm:11  ..... ... ..... ....... %rs1 %rd
++@r2_zimm10 .. zimm:10  ..... ... ..... ....... %rs1 %rd
+ @r2_s    .......   ..... ..... ... ..... ....... %rs2 %rs1
+ 
+ @hfence_gvma ....... ..... .....   ... ..... ....... %rs2 %rs1
+@@ -647,6 +648,7 @@ vsext_vf4       010010 . ..... 00101 010 ..... 1010111 @r2_vm
+ vsext_vf8       010010 . ..... 00011 010 ..... 1010111 @r2_vm
+ 
+ vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm11
++vsetivli        11 .......... ..... 111 ..... 1010111  @r2_zimm10
+ vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
+ 
+ # *** RV32Zfh Extension ***
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index f916f5d064..49f2eeee47 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -159,6 +159,29 @@ static bool do_vsetvl(DisasContext *ctx, int rd, int rs1, TCGv s2)
+     return true;
+ }
+ 
++static bool do_vsetivli(DisasContext *ctx, int rd, TCGv s1, TCGv s2)
++{
++    TCGv dst;
++
++    if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
++        return false;
++    }
++
++    dst = tcg_temp_new();
++
++    gen_helper_vsetvl(dst, cpu_env, s1, s2);
++    gen_set_gpr(rd, dst);
++    mark_vs_dirty(ctx);
++    tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
++    lookup_and_goto_ptr(ctx);
++    ctx->base.is_jmp = DISAS_NORETURN;
++
++    tcg_temp_free(s1);
++    tcg_temp_free(s2);
++    tcg_temp_free(dst);
++    return true;
++}
++
+ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
+ {
+     TCGv s2 = tcg_temp_new();
+@@ -172,6 +195,13 @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
+     return do_vsetvl(ctx, a->rd, a->rs1, s2);
+ }
+ 
++static bool trans_vsetivli(DisasContext *ctx, arg_vsetivli *a)
++{
++    TCGv s1 = tcg_const_tl(a->rs1);
++    TCGv s2 = tcg_const_tl(a->zimm);
++    return do_vsetivli(ctx, a->rd, s1, s2);
++}
++
+ /* vector register offset from env */
+ static uint32_t vreg_ofs(DisasContext *s, int reg)
+ {
+-- 
+2.33.1
+

+ 91 - 0
recipes-devtools/qemu/qemu/0082-target-riscv-rvv-1.0-add-evl-parameter-to-vext_ldst_.patch

@@ -0,0 +1,91 @@
+From d8a62ac2d3f365054d7788fea734152c3c250e79 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 24 Feb 2021 16:15:52 +0800
+Subject: [PATCH 082/107] target/riscv: rvv-1.0: add evl parameter to
+ vext_ldst_us()
+
+rvv v0.10 adds vector unit-stride mask load/store instructions
+(vle1.v, vse1.v), which has:
+    evl (effective vector length) = ceil(env-vl/8).
+
+The new instructions operate the same as unmasked byte loads and stores.
+Add evl parameter to reuse vext_ldst_us().
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/vector_helper.c | 38 ++++++++++++++++++------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 78787af258..0e0b392a27 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -292,17 +292,17 @@ GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
+ /* unmasked unit-stride load and store operation*/
+ static void
+ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+-             vext_ldst_elem_fn *ldst_elem,
+-             uint32_t esz, uintptr_t ra, MMUAccessType access_type)
++             vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t evl,
++             uintptr_t ra, MMUAccessType access_type)
+ {
+     uint32_t i, k;
+     uint32_t nf = vext_nf(desc);
+     uint32_t max_elems = vext_max_elems(desc, esz);
+ 
+     /* probe every access */
+-    probe_pages(env, base, env->vl * (nf << esz), ra, access_type);
++    probe_pages(env, base, evl * (nf << esz), ra, access_type);
+     /* load bytes from guest memory */
+-    for (i = env->vstart; i < env->vl; i++) {
++    for (i = env->vstart; i < evl; i++) {
+         k = 0;
+         while (k < nf) {
+             target_ulong addr = base + ((i * nf + k) << esz);
+@@ -332,7 +332,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+                   CPURISCVState *env, uint32_t desc)                    \
+ {                                                                       \
+     vext_ldst_us(vd, base, env, desc, LOAD_FN,                          \
+-                 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD);          \
++                 ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_LOAD); \
+ }
+ 
+ GEN_VEXT_LD_US(vle8_v,  int8_t,  lde_b)
+@@ -340,20 +340,20 @@ GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
+ GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
+ GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
+ 
+-#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN)                           \
+-void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,         \
+-                         CPURISCVState *env, uint32_t desc)             \
+-{                                                                       \
+-    uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE));             \
+-    vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN,  \
+-                     ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE);     \
+-}                                                                       \
+-                                                                        \
+-void HELPER(NAME)(void *vd, void *v0, target_ulong base,                \
+-                  CPURISCVState *env, uint32_t desc)                    \
+-{                                                                       \
+-    vext_ldst_us(vd, base, env, desc, STORE_FN,                         \
+-                 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE);         \
++#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN)                            \
++void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base,          \
++                         CPURISCVState *env, uint32_t desc)              \
++{                                                                        \
++    uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE));              \
++    vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN,   \
++                     ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE);      \
++}                                                                        \
++                                                                         \
++void HELPER(NAME)(void *vd, void *v0, target_ulong base,                 \
++                  CPURISCVState *env, uint32_t desc)                     \
++{                                                                        \
++    vext_ldst_us(vd, base, env, desc, STORE_FN,                          \
++                 ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_STORE); \
+ }
+ 
+ GEN_VEXT_ST_US(vse8_v,  int8_t,  ste_b)
+-- 
+2.33.1
+

+ 128 - 0
recipes-devtools/qemu/qemu/0083-target-riscv-rvv-1.0-add-vector-unit-stride-mask-loa.patch

@@ -0,0 +1,128 @@
+From 859e2c447049371276adb2af8c0724e5f0eaceb9 Mon Sep 17 00:00:00 2001
+From: Frank Chang <frank.chang@sifive.com>
+Date: Wed, 24 Feb 2021 16:16:12 +0800
+Subject: [PATCH 083/107] target/riscv: rvv-1.0: add vector unit-stride mask
+ load/store insns
+
+Signed-off-by: Frank Chang <frank.chang@sifive.com>
+---
+ target/riscv/helper.h                   |  2 ++
+ target/riscv/insn32.decode              |  4 +++
+ target/riscv/insn_trans/trans_rvv.c.inc | 40 +++++++++++++++++++++++++
+ target/riscv/vector_helper.c            | 21 +++++++++++++
+ 4 files changed, 67 insertions(+)
+
+diff --git a/target/riscv/helper.h b/target/riscv/helper.h
+index a83a7c33c0..f085f2fbb5 100644
+--- a/target/riscv/helper.h
++++ b/target/riscv/helper.h
+@@ -130,6 +130,8 @@ DEF_HELPER_5(vse8_v_mask, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vse16_v_mask, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vse32_v_mask, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_5(vse64_v_mask, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vlm_v, void, ptr, ptr, tl, env, i32)
++DEF_HELPER_5(vsm_v, void, ptr, ptr, tl, env, i32)
+ DEF_HELPER_6(vlse8_v, void, ptr, ptr, tl, tl, env, i32)
+ DEF_HELPER_6(vlse16_v, void, ptr, ptr, tl, tl, env, i32)
+ DEF_HELPER_6(vlse32_v, void, ptr, ptr, tl, tl, env, i32)
+diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
+index f6eec02d55..12fbad1b6b 100644
+--- a/target/riscv/insn32.decode
++++ b/target/riscv/insn32.decode
+@@ -251,6 +251,10 @@ vse16_v    ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
+ vse32_v    ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
+ vse64_v    ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
+ 
++# Vector unit-stride mask load/store insns.
++vlm_v      000 000 1 01011 ..... 000 ..... 0000111 @r2
++vsm_v      000 000 1 01011 ..... 000 ..... 0100111 @r2
++
+ # Vector strided insns.
+ vlse8_v     ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
+ vlse16_v    ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
+diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
+index 49f2eeee47..c7ef4fa0ac 100644
+--- a/target/riscv/insn_trans/trans_rvv.c.inc
++++ b/target/riscv/insn_trans/trans_rvv.c.inc
+@@ -747,6 +747,46 @@ GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
+ GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
+ GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
+ 
++/*
++ *** unit stride mask load and store
++ */
++static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
++{
++    uint32_t data = 0;
++    gen_helper_ldst_us *fn = gen_helper_vlm_v;
++
++    /* EMUL = 1, NFIELDS = 1 */
++    data = FIELD_DP32(data, VDATA, LMUL, 0);
++    data = FIELD_DP32(data, VDATA, NF, 1);
++    return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
++}
++
++static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew)
++{
++    /* EMUL = 1, NFIELDS = 1 */
++    return require_rvv(s) && vext_check_isa_ill(s);
++}
++
++static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
++{
++    uint32_t data = 0;
++    gen_helper_ldst_us *fn = gen_helper_vsm_v;
++
++    /* EMUL = 1, NFIELDS = 1 */
++    data = FIELD_DP32(data, VDATA, LMUL, 0);
++    data = FIELD_DP32(data, VDATA, NF, 1);
++    return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
++}
++
++static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew)
++{
++    /* EMUL = 1, NFIELDS = 1 */
++    return require_rvv(s) && vext_check_isa_ill(s);
++}
++
++GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
++GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
++
+ /*
+  *** stride load and store
+  */
+diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
+index 0e0b392a27..aca8e63023 100644
+--- a/target/riscv/vector_helper.c
++++ b/target/riscv/vector_helper.c
+@@ -361,6 +361,27 @@ GEN_VEXT_ST_US(vse16_v, int16_t, ste_h)
+ GEN_VEXT_ST_US(vse32_v, int32_t, ste_w)
+ GEN_VEXT_ST_US(vse64_v, int64_t, ste_d)
+ 
++/*
++ *** unit stride mask load and store, EEW = 1
++ */
++void HELPER(vlm_v)(void *vd, void *v0, target_ulong base,
++                    CPURISCVState *env, uint32_t desc)
++{
++    /* evl = ceil(vl/8) */
++    uint8_t evl = (env->vl + 7) >> 3;
++    vext_ldst_us(vd, base, env, desc, lde_b,
++                 0, evl, GETPC(), MMU_DATA_LOAD);
++}
++
++void HELPER(vsm_v)(void *vd, void *v0, target_ulong base,
++                    CPURISCVState *env, uint32_t desc)
++{
++    /* evl = ceil(vl/8) */
++    uint8_t evl = (env->vl + 7) >> 3;
++    vext_ldst_us(vd, base, env, desc, ste_b,
++                 0, evl, GETPC(), MMU_DATA_STORE);
++}
++
+ /*
+  *** index: access vector element from indexed memory
+  */
+-- 
+2.33.1
+

Some files were not shown because too many files changed in this diff