Browse Source

dubhe: gdb: Update GDB version to support full B extension

Added patch files and updated GDB repo to point to the RISC-V GDB that
supports full B v1.0 extension.

Signed-off-by: Jun Yuan Tan <junyuan.tan@starfivetech.com>
Jun Yuan Tan 2 years ago
parent
commit
5d50135b01

+ 25 - 0
recipes-devtools/gdb/gdb-10.1.inc

@@ -0,0 +1,25 @@
+LICENSE = "GPLv2 & GPLv3 & LGPLv2 & LGPLv3"
+LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \
+		    file://COPYING3;md5=d32239bcb673463ab874e80d47fae504 \
+		    file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6 \
+		    file://COPYING.LIB;md5=9f604d8a4f8e74f4f5140845a21b6674"
+
+SRCREV = "5da071ef0965b8054310d8dde9975037b0467311"
+BRANCH = "fsf-gdb-10.1-with-sim"
+
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/riscv/riscv-binutils-gdb.git;branch=${BRANCH} \
+           file://0001-add-b-v-extension-support.patch \
+           file://0001-make-man-install-relative-to-DESTDIR.patch \
+           file://0002-mips-linux-nat-Define-_ABIO32-if-not-defined.patch \
+           file://0003-ppc-ptrace-Define-pt_regs-uapi_pt_regs-on-GLIBC-syst.patch \
+           file://0004-Add-support-for-Renesas-SH-sh4-architecture.patch \
+           file://0005-Dont-disable-libreadline.a-when-using-disable-static.patch \
+           file://0006-use-asm-sgidefs.h.patch \
+           file://0008-Change-order-of-CFLAGS.patch \
+           file://0009-resolve-restrict-keyword-conflict.patch \
+           file://0010-Fix-invalid-sigprocmask-call.patch \
+           file://0011-gdbserver-ctrl-c-handling.patch \
+           "
+SRC_URI[sha256sum] = "f82f1eceeec14a3afa2de8d9b0d3c91d5a3820e23e0a01bbb70ef9f0276b62c0"

+ 65 - 0
recipes-devtools/gdb/gdb-common.inc

@@ -0,0 +1,65 @@
+SUMMARY = "GNU debugger"
+HOMEPAGE = "http://www.gnu.org/software/gdb/"
+DESCRIPTION = "GDB, the GNU Project debugger, allows you to see what is going on inside another program while it executes -- or what another program was doing at the moment it crashed."
+SECTION = "devel"
+DEPENDS = "expat zlib ncurses virtual/libiconv ${LTTNGUST} bison-native"
+
+LTTNGUST = "lttng-ust"
+LTTNGUST:arc = ""
+LTTNGUST:aarch64 = ""
+LTTNGUST:mipsarch = ""
+LTTNGUST:sh4 = ""
+
+inherit autotools texinfo
+
+UPSTREAM_CHECK_GITTAGREGEX = "gdb\-(?P<pver>.+)\-release"
+
+B = "${WORKDIR}/build-${TARGET_SYS}"
+
+EXPAT = "--with-expat --with-libexpat-prefix=${STAGING_DIR_HOST}"
+
+EXTRA_OECONF = "--disable-gdbtk --disable-x --disable-werror \
+                --with-curses --disable-multilib --disable-sim \
+                --without-guile \
+                ${GDBPROPREFIX} ${EXPAT} \
+                ${@bb.utils.contains('DISTRO_FEATURES', 'multiarch', '--enable-64-bit-bfd', '', d)} \
+                --disable-rpath \
+                --disable-gas --disable-binutils \
+                --disable-ld --disable-gold \
+                --disable-gprof \
+"
+
+PACKAGECONFIG ??= "readline"
+# Use --without-system-readline to compile with readline 5.
+PACKAGECONFIG[readline] = "--with-system-readline,--without-system-readline,readline"
+PACKAGECONFIG[python] = "--with-python=${WORKDIR}/python,--without-python,python3,python3 python3-codecs"
+PACKAGECONFIG[babeltrace] = "--with-babeltrace,--without-babeltrace,babeltrace"
+# ncurses is already a hard DEPENDS, but would be added here if it weren't
+PACKAGECONFIG[tui] = "--enable-tui,--disable-tui"
+PACKAGECONFIG[xz] = "--with-lzma,--without-lzma,xz"
+PACKAGECONFIG[debuginfod] = "--with-debuginfod, --without-debuginfod, elfutils"
+
+GDBPROPREFIX = "--program-prefix=''"
+
+DISABLE_STATIC = ""
+
+do_configure () {
+	# override this function to avoid the autoconf/automake/aclocal/autoheader
+	# calls for now
+	(cd ${S} && gnu-configize) || die "failure in running gnu-configize"
+	oe_runconf
+}
+
+# we don't want gdb to provide bfd/iberty/opcodes, which instead will override the
+# right bits installed by binutils.  Same for bfd.info -- also from binutils.
+do_install:append() {
+	rm -rf ${D}${libdir}
+	rm -rf ${D}${includedir}
+	rm -rf ${D}${datadir}/locale
+	rm -f ${D}${infodir}/bfd.info
+}
+
+RRECOMMENDS:gdb:append:linux = " glibc-thread-db "
+RRECOMMENDS:gdb:append:linux-gnueabi = " glibc-thread-db "
+RRECOMMENDS:gdbserver:append:linux = " glibc-thread-db "
+RRECOMMENDS:gdbserver:append:linux-gnueabi = " glibc-thread-db "

+ 42 - 0
recipes-devtools/gdb/gdb-cross-canadian.inc

@@ -0,0 +1,42 @@
+inherit cross-canadian
+inherit python3-dir
+
+SUMMARY = "GNU debugger (cross-canadian gdb for ${TARGET_ARCH} target)"
+PN = "gdb-cross-canadian-${TRANSLATED_TARGET_ARCH}"
+BPN = "gdb"
+
+DEPENDS = "nativesdk-ncurses nativesdk-expat nativesdk-gettext \
+           virtual/${HOST_PREFIX}gcc-crosssdk virtual/${HOST_PREFIX}binutils-crosssdk virtual/nativesdk-libc"
+
+GDBPROPREFIX = "--program-prefix='${TARGET_PREFIX}'"
+
+# Overrides PACKAGECONFIG variables in gdb-common.inc
+PACKAGECONFIG ??= "python readline"
+PACKAGECONFIG[python] = "--with-python=${WORKDIR}/python,--without-python,nativesdk-python3, \
+                         nativesdk-python3-core \
+                         nativesdk-python3-codecs nativesdk-python3-netclient \
+                         "
+PACKAGECONFIG[readline] = "--with-system-readline,--without-system-readline,nativesdk-readline"
+
+SSTATE_DUPWHITELIST += "${STAGING_DATADIR}/gdb"
+
+do_configure:prepend() {
+cat > ${WORKDIR}/python << EOF
+#! /bin/sh
+case "\$2" in
+        --includes) echo "-I${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}/" ;;
+        --ldflags) echo "-Wl,-rpath-link,${STAGING_LIBDIR}/.. -Wl,-rpath,${libdir}/.. -lpthread -ldl -lutil -lm -lpython${PYTHON_BASEVERSION}${PYTHON_ABI}" ;;
+        --exec-prefix) echo "${exec_prefix}" ;;
+        *) exit 1 ;;
+esac
+exit 0
+EOF
+        chmod +x ${WORKDIR}/python
+}
+
+# we don't want gdb to provide bfd/iberty/opcodes, which instead will override the
+# right bits installed by binutils.
+do_install:append() {
+	rm -rf ${D}${exec_prefix}/lib
+	cross_canadian_bindirlinks
+}

+ 3 - 0
recipes-devtools/gdb/gdb-cross-canadian_10.1.bb

@@ -0,0 +1,3 @@
+require gdb-common.inc
+require gdb-cross-canadian.inc
+require gdb-${PV}.inc

+ 30 - 0
recipes-devtools/gdb/gdb-cross.inc

@@ -0,0 +1,30 @@
+require gdb-common.inc
+
+DEPENDS = "expat-native ncurses-native flex-native bison-native"
+
+inherit python3native
+
+# Overrides PACKAGECONFIG variables in gdb-common.inc
+PACKAGECONFIG ??= "python readline"
+PACKAGECONFIG[python] = "--with-python=${PYTHON},--without-python,python3-native"
+PACKAGECONFIG[readline] = "--with-system-readline,--without-system-readline,readline-native"
+
+do_compile:prepend() {
+    export STAGING_LIBDIR="${STAGING_LIBDIR_NATIVE}"
+    export STAGING_INCDIR="${STAGING_INCDIR_NATIVE}"
+}
+
+#EXTRA_OEMAKE += "LDFLAGS='${BUILD_LDFLAGS}'"
+
+GDBPROPREFIX = ""
+
+PN = "gdb-cross-${TARGET_ARCH}"
+BPN = "gdb"
+
+# Ignore how TARGET_ARCH is computed.
+TARGET_ARCH[vardepvalue] = "${TARGET_ARCH}"
+
+inherit cross
+inherit gettext
+
+datadir .= "/gdb-${TARGET_SYS}${TARGET_VENDOR}-${TARGET_OS}"

+ 2 - 0
recipes-devtools/gdb/gdb-cross_10.1.bb

@@ -0,0 +1,2 @@
+require gdb-cross.inc
+require gdb-${PV}.inc

+ 11 - 0
recipes-devtools/gdb/gdb.inc

@@ -0,0 +1,11 @@
+require gdb-common.inc
+
+inherit gettext
+
+#LDFLAGS:append = " -s"
+#export CFLAGS:append=" -L${STAGING_LIBDIR}"
+
+# cross-canadian must not see this
+PACKAGES =+ "gdbserver"
+FILES:gdbserver = "${bindir}/gdbserver"
+

+ 4519 - 0
recipes-devtools/gdb/gdb/0001-add-b-v-extension-support.patch

@@ -0,0 +1,4519 @@
+From e0e44ae667aea74a1cf3738cd7a6174b45897a82 Mon Sep 17 00:00:00 2001
+From: "max.ma" <max.ma@starfivetech.com>
+Date: Mon, 1 Nov 2021 18:13:28 -0700
+Subject: [PATCH 1/1] add b & v extension support
+
+---
+ gas/config/tc-riscv.c      |  464 ++++++++
+ include/opcode/riscv-opc.h | 2192 +++++++++++++++++++++++++++++++++++-
+ include/opcode/riscv.h     |   88 +-
+ opcodes/riscv-dis.c        |   92 ++
+ opcodes/riscv-opc.c        | 1334 ++++++++++++++++++++++
+ 5 files changed, 4168 insertions(+), 2 deletions(-)
+
+diff --git a/gas/config/tc-riscv.c b/gas/config/tc-riscv.c
+index 9df6d3f415..45f5bda4a2 100644
+--- a/gas/config/tc-riscv.c
++++ b/gas/config/tc-riscv.c
+@@ -235,6 +235,25 @@ riscv_multi_subset_supports (enum riscv_insn_class insn_class)
+ 
+     case INSN_CLASS_Q: return riscv_subset_supports ("q");
+ 
++    case INSN_CLASS_ZBB:
++      return riscv_subset_supports ("zbb");
++    case INSN_CLASS_ZBA:
++      return riscv_subset_supports ("zba");
++    case INSN_CLASS_ZBC:
++      return riscv_subset_supports ("zbc");
++    case INSN_CLASS_ZBS:
++      return riscv_subset_supports ("zbs");
++    
++    case INSN_CLASS_V: 
++      return riscv_subset_supports ("v");
++    case INSN_CLASS_V_AND_F:
++      return riscv_subset_supports ("v") && riscv_subset_supports ("f");
++    case INSN_CLASS_V_OR_ZVAMO:
++      return (riscv_subset_supports ("a")
++	      && (riscv_subset_supports ("v")
++		  || riscv_subset_supports ("zvamo")));
++    case INSN_CLASS_V_OR_ZVLSSEG:
++      return riscv_subset_supports ("v") || riscv_subset_supports ("zvlsseg");
+     default:
+       as_fatal ("Unreachable");
+       return FALSE;
+@@ -599,6 +618,8 @@ enum reg_class
+ {
+   RCLASS_GPR,
+   RCLASS_FPR,
++  RCLASS_VECR,
++  RCLASS_VECM,
+   RCLASS_MAX,
+ 
+   RCLASS_CSR
+@@ -700,6 +721,12 @@ riscv_csr_address (const char *csr_name,
+     case CSR_CLASS_DEBUG:
+       need_check_version = FALSE;
+       break;
++    case CSR_CLASS_V:
++      result = (riscv_subset_supports ("v")
++		|| riscv_subset_supports ("zvamo")
++		|| riscv_subset_supports ("zvlsseg"));
++      need_check_version = false;    
++      break;  
+     default:
+       as_bad (_("internal: bad RISC-V CSR class (0x%x)"), csr_class);
+     }
+@@ -914,6 +941,8 @@ validate_riscv_insn (const struct riscv_opcode *opc, int length)
+       case ')': break;
+       case '<': USE_BITS (OP_MASK_SHAMTW,	OP_SH_SHAMTW);	break;
+       case '>':	USE_BITS (OP_MASK_SHAMT,	OP_SH_SHAMT);	break;
++      case '#': used_bits |= ENCODE_PREF_TIMM (-1U); break;
++      case '+': used_bits |= ENCODE_PREF_SIMM (-1U); break;   
+       case 'A': break;
+       case 'D':	USE_BITS (OP_MASK_RD,		OP_SH_RD);	break;
+       case 'Z':	USE_BITS (OP_MASK_RS1,		OP_SH_RS1);	break;
+@@ -966,6 +995,33 @@ validate_riscv_insn (const struct riscv_opcode *opc, int length)
+ 	     return FALSE;
+ 	  }
+ 	break;
++
++ case 'V': /* RVV */
++	switch (c = *p++)
++	  {
++	  case 'd':
++	  case 'f': USE_BITS (OP_MASK_VD, OP_SH_VD); break;
++	  case 'e': USE_BITS (OP_MASK_VWD, OP_SH_VWD); break;
++	  case 's': USE_BITS (OP_MASK_VS1, OP_SH_VS1); break;
++	  case 't': USE_BITS (OP_MASK_VS2, OP_SH_VS2); break;
++	  case 'u': USE_BITS (OP_MASK_VS1, OP_SH_VS1);
++		    USE_BITS (OP_MASK_VS2, OP_SH_VS2); break;
++	  case 'v': USE_BITS (OP_MASK_VD, OP_SH_VD);
++		    USE_BITS (OP_MASK_VS1, OP_SH_VS1);
++		    USE_BITS (OP_MASK_VS2, OP_SH_VS2); break;
++	  case '0': break;
++	  case 'b': used_bits |= ENCODE_RVV_VB_IMM (-1U); break;
++	  case 'c': used_bits |= ENCODE_RVV_VC_IMM (-1U); break;
++	  case 'i':
++	  case 'j':
++	  case 'k': USE_BITS (OP_MASK_VIMM, OP_SH_VIMM); break;
++	  case 'm': USE_BITS (OP_MASK_VMASK, OP_SH_VMASK); break;
++	  default:
++	    as_bad (_("internal: bad RISC-V opcode (unknown operand type `V%c'): %s %s"),
++		    c, opc->name, opc->args);
++	  }
++	break;
++
+       default:
+ 	as_bad (_("internal: bad RISC-V opcode "
+ 		  "(unknown operand type `%c'): %s %s"),
+@@ -1044,6 +1100,8 @@ md_begin (void)
+   hash_reg_names (RCLASS_GPR, riscv_gpr_names_abi, NGPR);
+   hash_reg_names (RCLASS_FPR, riscv_fpr_names_numeric, NFPR);
+   hash_reg_names (RCLASS_FPR, riscv_fpr_names_abi, NFPR);
++  hash_reg_names (RCLASS_VECR, riscv_vecr_names_numeric, NVECR);
++  hash_reg_names (RCLASS_VECM, riscv_vecm_names_numeric, NVECM);  
+   /* Add "fp" as an alias for "s0".  */
+   hash_reg_name (RCLASS_GPR, "fp", 8);
+ 
+@@ -1202,6 +1260,41 @@ macro_build (expressionS *ep, const char *name, const char *fmt, ...)
+ 	  break;
+ 	case ',':
+ 	  continue;
++
++	case 'V': /* RVV */
++	  {
++	    switch (*fmt++)
++	      {
++	      case 'd':
++		INSERT_OPERAND (VD, insn, va_arg (args, int));
++		continue;
++
++	      case 's':
++		INSERT_OPERAND (VS1, insn, va_arg (args, int));
++		continue;
++
++	      case 't':
++		INSERT_OPERAND (VS2, insn, va_arg (args, int));
++		continue;
++
++	      case 'm':
++		{
++		  int reg = va_arg (args, int);
++		  if (reg == -1)
++		    {
++		      INSERT_OPERAND (VMASK, insn, 1);
++		      continue;
++		    }
++		  else if (reg == 0)
++		    {
++		      INSERT_OPERAND (VMASK, insn, 0);
++		      continue;
++		    }
++		}
++		/* fallthru */
++	      }
++	  }
++
+ 	default:
+ 	  as_fatal (_("internal error: invalid macro"));
+ 	}
+@@ -1267,6 +1360,27 @@ check_absolute_expr (struct riscv_cl_insn *ip, expressionS *ex,
+   normalize_constant_expr (ex);
+ }
+ 
++/* The pref type should be one of the following:
++    0000 - scalar prefetch L1, fetch data as if for a normal scalar load, 
++           and imply load into all lower cache destination levels.
++    0001 - scalar prefetch L2, fetch data and place the cache-line into L2,
++           and imply load into all lower cache destination levels.
++    0010 - scalar prefetch L3, fetch data and place the cache-line into L3,
++           and imply load into all lower cache destination levels.
++    0011 - scalar prefetch L4, fetch data and place the cache-line into L4,
++           and imply load into all lower cache destination levels
++           (if the L5 is in the cache heirarchy).
++    1000 - vector prefetch L1, fetch data as if for a normal scalar load
++           but place the cache-line into vector buffer (vector L1) ,
++           and imply load into all lower cache destination levels.
++*/
++
++static bfd_boolean check_pref_type(unsigned long type)
++{
++  if (type != 0 && type != 1 && type != 3 && type != 8)
++    as_bad (_("Improper pref type (%lu)"), type);
++}
++
+ static symbolS *
+ make_internal_label (void)
+ {
+@@ -1366,6 +1480,113 @@ load_const (int reg, expressionS *ep)
+     }
+ }
+ 
++/* Expand RISC-V Vector macros into one of more instructions.  */
++
++static void
++vector_macro (struct riscv_cl_insn *ip)
++{
++  int vd = (ip->insn_opcode >> OP_SH_VD) & OP_MASK_VD;
++  int vs1 = (ip->insn_opcode >> OP_SH_VS1) & OP_MASK_VS1;
++  int vs2 = (ip->insn_opcode >> OP_SH_VS2) & OP_MASK_VS2;
++  int vm = (ip->insn_opcode >> OP_SH_VMASK) & OP_MASK_VMASK;
++  int vtemp = (ip->insn_opcode >> OP_SH_VFUNCT6) & OP_MASK_VFUNCT6;
++  int mask = ip->insn_mo->mask;
++
++  switch (mask)
++    {
++    case M_VMSGE:
++      if (vm)
++	{
++	  /* Unmasked.  */
++	  macro_build (NULL, "vmslt.vx", "Vd,Vt,sVm", vd, vs2, vs1, -1);
++	  macro_build (NULL, "vmnand.mm", "Vd,Vt,Vs", vd, vd, vd);
++	  break;
++	}
++      if (vtemp != 0)
++	{
++	  /* Masked.  Have vtemp to avoid overlap constraints.  */
++	  if (vd == vm)
++	    {
++	      macro_build (NULL, "vmslt.vx", "Vd,Vt,s", vtemp, vs2, vs1);
++	      macro_build (NULL, "vmandnot.mm", "Vd,Vt,Vs", vd, vm, vtemp);
++	    }
++	  else
++	    {
++	      /* Preserve the value of vd if not updating by vm.  */
++	      macro_build (NULL, "vmslt.vx", "Vd,Vt,s", vtemp, vs2, vs1);
++	      macro_build (NULL, "vmandnot.mm", "Vd,Vt,Vs", vtemp, vm, vtemp);
++	      macro_build (NULL, "vmandnot.mm", "Vd,Vt,Vs", vd, vd, vm);
++	      macro_build (NULL, "vmor.mm", "Vd,Vt,Vs", vd, vtemp, vd);
++	    }
++	}
++      else if (vd != vm)
++	{
++	  /* Masked.  This may cause the vd overlaps vs2, when LMUL > 1.  */
++	  macro_build (NULL, "vmslt.vx", "Vd,Vt,sVm", vd, vs2, vs1, vm);
++	  macro_build (NULL, "vmxor.mm", "Vd,Vt,Vs", vd, vd, vm);
++	}
++      else
++	as_bad (_("must provide temp if destination overlaps mask"));
++      break;
++
++    case M_VMSGEU:
++      if (vm)
++	{
++	  /* Unmasked.  */
++	  macro_build (NULL, "vmsltu.vx", "Vd,Vt,sVm", vd, vs2, vs1, -1);
++	  macro_build (NULL, "vmnand.mm", "Vd,Vt,Vs", vd, vd, vd);
++	  break;
++	}
++      if (vtemp != 0)
++	{
++	  /* Masked.  Have vtemp to avoid overlap constraints.  */
++	  if (vd == vm)
++	    {
++	      macro_build (NULL, "vmsltu.vx", "Vd,Vt,s", vtemp, vs2, vs1);
++	      macro_build (NULL, "vmandnot.mm", "Vd,Vt,Vs", vd, vm, vtemp);
++	    }
++	  else
++	    {
++	      /* Preserve the value of vd if not updating by vm.  */
++	      macro_build (NULL, "vmsltu.vx", "Vd,Vt,s", vtemp, vs2, vs1);
++	      macro_build (NULL, "vmandnot.mm", "Vd,Vt,Vs", vtemp, vm, vtemp);
++	      macro_build (NULL, "vmandnot.mm", "Vd,Vt,Vs", vd, vd, vm);
++	      macro_build (NULL, "vmor.mm", "Vd,Vt,Vs", vd, vtemp, vd);
++	    }
++	}
++      else if (vd != vm)
++	{
++	  /* Masked.  This may cause the vd overlaps vs2, when LMUL > 1.  */
++	  macro_build (NULL, "vmsltu.vx", "Vd,Vt,sVm", vd, vs2, vs1, vm);
++	  macro_build (NULL, "vmxor.mm", "Vd,Vt,Vs", vd, vd, vm);
++	}
++      else
++	as_bad (_("must provide temp if destination overlaps mask"));
++      break;
++
++    default:
++      as_bad (_("Macro %s not implemented"), ip->insn_mo->name);
++      break;
++    }
++}
++
++/* Zero extend and sign extend byte/half-word/word.  */
++
++static void
++riscv_ext (int destreg, int srcreg, unsigned shift, bool sign)
++{
++  if (sign)
++    {
++      md_assemblef ("slli x%d, x%d, 0x%x", destreg, srcreg, shift);
++      md_assemblef ("srai x%d, x%d, 0x%x", destreg, destreg, shift);
++    }
++  else
++    {
++      md_assemblef ("slli x%d, x%d, 0x%x", destreg, srcreg, shift);
++      md_assemblef ("srli x%d, x%d, 0x%x", destreg, destreg, shift);
++    }
++}
++
+ /* Expand RISC-V assembly macros into one or more instructions.  */
+ static void
+ macro (struct riscv_cl_insn *ip, expressionS *imm_expr,
+@@ -1640,6 +1861,66 @@ my_getSmallExpression (expressionS *ep, bfd_reloc_code_real_type *reloc,
+   return reloc_index;
+ }
+ 
++/* Parse string STR as a vsetvli operand.  Store the expression in *EP.
++   On exit, EXPR_END points to the first character after the expression.  */
++
++static void
++my_getVsetvliExpression (expressionS *ep, char *str)
++{
++  unsigned int vsew_value = 0, vlmul_value = 0;
++  unsigned int vta_value = 0, vma_value = 0;
++  bfd_boolean vsew_found = FALSE, vlmul_found = FALSE;
++  bfd_boolean vta_found = FALSE, vma_found = FALSE;
++
++  if (arg_lookup (&str, riscv_vsew, ARRAY_SIZE (riscv_vsew), &vsew_value))
++    {
++      if (*str == ',')
++	++str;
++      if (vsew_found)
++	as_bad (_("multiple vsew constants"));
++      vsew_found = TRUE;
++    }
++  if (arg_lookup (&str, riscv_vlmul, ARRAY_SIZE (riscv_vlmul), &vlmul_value))
++    {
++      if (*str == ',')
++	++str;
++      if (vlmul_found)
++	as_bad (_("multiple vlmul constants"));
++      vlmul_found = TRUE;
++    }
++  if (arg_lookup (&str, riscv_vta, ARRAY_SIZE (riscv_vta), &vta_value))
++    {
++      if (*str == ',')
++	++str;
++      if (vta_found)
++	as_bad (_("multiple vta constants"));
++      vta_found = TRUE;
++    }
++  if (arg_lookup (&str, riscv_vma, ARRAY_SIZE (riscv_vma), &vma_value))
++    {
++      if (*str == ',')
++	++str;
++      if (vma_found)
++	as_bad (_("multiple vma constants"));
++      vma_found = TRUE;
++    }
++
++  if (vsew_found || vlmul_found || vta_found || vma_found)
++    {
++      ep->X_op = O_constant;
++      ep->X_add_number = (vlmul_value << OP_SH_VLMUL)
++			 | (vsew_value << OP_SH_VSEW)
++			 | (vta_value << OP_SH_VTA)
++			 | (vma_value << OP_SH_VMA);
++      expr_end = str;
++    }
++  else
++    {
++      my_getExpression (ep, str);
++      str = expr_end;
++    }
++}
++
+ /* Parse opcode name, could be an mnemonics or number.  */
+ static size_t
+ my_getOpcodeExpression (expressionS *ep, bfd_reloc_code_real_type *reloc,
+@@ -2187,6 +2468,25 @@ riscv_ip (char *str, struct riscv_cl_insn *ip, expressionS *imm_expr,
+ 	      s = expr_end;
+ 	      continue;
+ 
++            case '#':
++              my_getExpression (imm_expr, s);
++              check_pref_type((unsigned long)imm_expr->X_add_number);
++              INSERT_OPERAND (PREF_TYPE, *ip, imm_expr->X_add_number);
++              imm_expr->X_op = O_absent;
++              s = expr_end;
++              continue;      
++
++            case '+':
++	      my_getExpression (imm_expr, s);
++	      check_absolute_expr (ip, imm_expr, FALSE);
++	      if ((unsigned long) imm_expr->X_add_number > 0xff)
++		as_bad (_("Improper pref offset value (%lu)"),
++			(unsigned long) imm_expr->X_add_number);
++	      INSERT_OPERAND (PREF_OFFSET, *ip, imm_expr->X_add_number);
++	      imm_expr->X_op = O_absent;
++	      s = expr_end;
++	      continue;
++
+ 	    case 'E':		/* Control register.  */
+ 	      insn_with_csr = TRUE;
+ 	      explicit_priv_attr = TRUE;
+@@ -2503,6 +2803,170 @@ riscv_ip (char *str, struct riscv_cl_insn *ip, expressionS *imm_expr,
+ 	      imm_expr->X_op = O_absent;
+ 	      continue;
+ 
++	    case 'V': /* RVV */
++	      switch (*++args)
++		{
++		case 'd': /* VD */
++		  if (!reg_lookup (&s, RCLASS_VECR, &regno))
++		    break;
++		  INSERT_OPERAND (VD, *ip, regno);
++		  continue;
++
++		case 'e': /* AMO VD */
++		  if (reg_lookup (&s, RCLASS_GPR, &regno) && regno == 0)
++		    INSERT_OPERAND (VWD, *ip, 0);
++		  else if (reg_lookup (&s, RCLASS_VECR, &regno))
++		    {
++		      INSERT_OPERAND (VWD, *ip, 1);
++		      INSERT_OPERAND (VD, *ip, regno);
++		    }
++		  else
++		    break;
++		  continue;
++
++		case 'f': /* AMO VS3 */
++		  if (!reg_lookup (&s, RCLASS_VECR, &regno))
++		    break;
++		  if (!EXTRACT_OPERAND (VWD, ip->insn_opcode))
++		    INSERT_OPERAND (VD, *ip, regno);
++		  else
++		    {
++		      /* VS3 must match VD.  */
++		      if (EXTRACT_OPERAND (VD, ip->insn_opcode) != regno)
++			break;
++		    }
++		  continue;
++
++		case 's': /* VS1 */
++		  if (!reg_lookup (&s, RCLASS_VECR, &regno))
++		    break;
++		  INSERT_OPERAND (VS1, *ip, regno);
++		  continue;
++
++		case 't': /* VS2 */
++		  if (!reg_lookup (&s, RCLASS_VECR, &regno))
++		    break;
++		  INSERT_OPERAND (VS2, *ip, regno);
++		  continue;
++
++		case 'u': /* VS1 == VS2 */
++		  if (!reg_lookup (&s, RCLASS_VECR, &regno))
++		    break;
++		  INSERT_OPERAND (VS1, *ip, regno);
++		  INSERT_OPERAND (VS2, *ip, regno);
++		  continue;
++
++		case 'v': /* VD == VS1 == VS2 */
++		  if (!reg_lookup (&s, RCLASS_VECR, &regno))
++		    break;
++		  INSERT_OPERAND (VD, *ip, regno);
++		  INSERT_OPERAND (VS1, *ip, regno);
++		  INSERT_OPERAND (VS2, *ip, regno);
++		  continue;
++
++		/* The `V0` is carry-in register for v[m]adc and v[m]sbc,
++		   and is used to choose vs1/rs1/frs1/imm or vs2 for
++		   v[f]merge.  It use the same encoding as the vector mask
++		   register.  */
++		case '0':
++		  if (reg_lookup (&s, RCLASS_VECR, &regno) && regno == 0)
++		    continue;
++		  break;
++
++		case 'b': /* vtypei for vsetivli */
++		  my_getVsetvliExpression (imm_expr, s);
++		  check_absolute_expr (ip, imm_expr, FALSE);
++		  if (!VALID_RVV_VB_IMM (imm_expr->X_add_number))
++		    as_bad (_("bad value for vsetivli immediate field, "
++			      "value must be 0..1023"));
++		  ip->insn_opcode
++		    |= ENCODE_RVV_VB_IMM (imm_expr->X_add_number);
++		  imm_expr->X_op = O_absent;
++		  s = expr_end;
++		  continue;
++
++		case 'c': /* vtypei for vsetvli */
++		  my_getVsetvliExpression (imm_expr, s);
++		  check_absolute_expr (ip, imm_expr, FALSE);
++		  if (!VALID_RVV_VC_IMM (imm_expr->X_add_number))
++		    as_bad (_("bad value for vsetvli immediate field, "
++			      "value must be 0..2047"));
++		  ip->insn_opcode
++		    |= ENCODE_RVV_VC_IMM (imm_expr->X_add_number);
++		  imm_expr->X_op = O_absent;
++		  s = expr_end;
++		  continue;
++
++		case 'i': /* vector arith signed immediate */
++		  my_getExpression (imm_expr, s);
++		  check_absolute_expr (ip, imm_expr, FALSE);
++		  if (imm_expr->X_add_number > 15
++		      || imm_expr->X_add_number < -16)
++		    as_bad (_("bad value for vector immediate field, "
++			      "value must be -16...15"));
++		  INSERT_OPERAND (VIMM, *ip, imm_expr->X_add_number);
++		  imm_expr->X_op = O_absent;
++		  s = expr_end;
++		  continue;
++
++		case 'j': /* vector arith unsigned immediate */
++		  my_getExpression (imm_expr, s);
++		  check_absolute_expr (ip, imm_expr, FALSE);
++		  if (imm_expr->X_add_number < 0
++		      || imm_expr->X_add_number >= 32)
++		    as_bad (_("bad value for vector immediate field, "
++			      "value must be 0...31"));
++		  INSERT_OPERAND (VIMM, *ip, imm_expr->X_add_number);
++		  imm_expr->X_op = O_absent;
++		  s = expr_end;
++		  continue;
++
++		case 'k': /* vector arith signed immediate, minus 1 */
++		  my_getExpression (imm_expr, s);
++		  check_absolute_expr (ip, imm_expr, FALSE);
++		  if (imm_expr->X_add_number > 16
++		      || imm_expr->X_add_number < -15)
++		    as_bad (_("bad value for vector immediate field, "
++			      "value must be -15...16"));
++		  INSERT_OPERAND (VIMM, *ip, imm_expr->X_add_number - 1);
++		  imm_expr->X_op = O_absent;
++		  s = expr_end;
++		  continue;
++
++		case 'm': /* optional vector mask */
++		  if (*s == '\0')
++		    {
++		      INSERT_OPERAND (VMASK, *ip, 1);
++		      continue;
++		    }
++		  else if (*s == ',' && s++
++			   && reg_lookup (&s, RCLASS_VECM, &regno)
++			   && regno == 0)
++		    {
++		      INSERT_OPERAND (VMASK, *ip, 0);
++		      continue;
++		    }
++		  break;
++
++		  /* The following ones are only used in macros.  */
++		case 'M': /* required vector mask */
++		  if (reg_lookup (&s, RCLASS_VECM, &regno) && regno == 0)
++		    {
++		      INSERT_OPERAND (VMASK, *ip, 0);
++		      continue;
++		    }
++		  break;
++
++		case 'T': /* vector macro temporary register */
++		  if (!reg_lookup (&s, RCLASS_VECR, &regno) || regno == 0)
++		    break;
++		  /* Store it in the FUNCT6 field as we don't have anyplace
++		     else to store it.  */
++		  INSERT_OPERAND (VFUNCT6, *ip, regno);
++		  continue;
++		}
++	      break;
++
+ 	    default:
+ 	      as_fatal (_("internal error: bad argument type %c"), *args);
+ 	    }
+diff --git a/include/opcode/riscv-opc.h b/include/opcode/riscv-opc.h
+index 158de32485..0c23fac724 100644
+--- a/include/opcode/riscv-opc.h
++++ b/include/opcode/riscv-opc.h
+@@ -113,6 +113,18 @@
+ #define MASK_SRL  0xfe00707f
+ #define MATCH_SRA 0x40005033
+ #define MASK_SRA  0xfe00707f
++#define MATCH_SLO 0x20001033
++#define MASK_SLO  0xfe00707f
++#define MATCH_SLOI 0x20001013
++#define MASK_SLOI  0xfc00707f
++#define MATCH_SLOW 0x2000103b
++#define MASK_SLOW  0xfe00707f
++#define MATCH_SRO 0x20005033
++#define MASK_SRO  0xfe00707f
++#define MATCH_SROI 0x20005013
++#define MASK_SROI  0xfc00707f
++#define MATCH_SROW 0x2000503b
++#define MASK_SROW  0xfe00707f
+ #define MATCH_OR 0x6033
+ #define MASK_OR  0xfe00707f
+ #define MATCH_AND 0x7033
+@@ -157,6 +169,8 @@
+ #define MASK_SW  0x707f
+ #define MATCH_SD 0x3023
+ #define MASK_SD  0x707f
++#define MATCH_PAUSE 0x0100000f
++#define MASK_PAUSE  0xffffffff
+ #define MATCH_FENCE 0xf
+ #define MASK_FENCE  0x707f
+ #define MATCH_FENCE_I 0x100f
+@@ -253,6 +267,16 @@
+ #define MASK_SFENCE_VMA  0xfe007fff
+ #define MATCH_WFI 0x10500073
+ #define MASK_WFI  0xffffffff
++/* Custom CSRs instruction */
++#define MATCH_CFLUSH_D_L1    0xfc000073
++#define MASK_CFLUSH_D_L1     0xfff07fff
++#define MATCH_CDISCARD_D_L1  0xfc200073
++#define MASK_CDISCARD_D_L1   0xfff07fff
++#define MATCH_CFLUSH_D_L2    0xfc400073
++#define MASK_CFLUSH_D_L2     0xfff07fff
++#define MATCH_CDISCARD_D_L2  0xfc600073
++#define MASK_CDISCARD_D_L2   0xfff07fff
++#define MASK_PREF   0x7fff
+ #define MATCH_CSRRW 0x1073
+ #define MASK_CSRRW  0x707f
+ #define MATCH_CSRRS 0x2073
+@@ -421,6 +445,200 @@
+ #define MASK_FCVT_Q_LU  0xfff0007f
+ #define MATCH_FMV_Q_X 0xf6000053
+ #define MASK_FMV_Q_X  0xfff0707f
++#define MATCH_CLZ 0x60001013
++#define MASK_CLZ  0xfff0707f
++#define MATCH_CTZ 0x60101013
++#define MASK_CTZ  0xfff0707f
++#define MATCH_CPOP 0x60201013
++#define MASK_CPOP  0xfff0707f
++#define MATCH_BMATFLIP 0x60301013
++#define MASK_BMATFLIP  0xfff0707f
++#define MATCH_CRC32_B 0x61001013
++#define MASK_CRC32_B  0xfff0707f
++#define MATCH_CRC32_H 0x61101013
++#define MASK_CRC32_H  0xfff0707f
++#define MATCH_CRC32_W 0x61201013
++#define MASK_CRC32_W  0xfff0707f
++#define MATCH_CRC32_D 0x61301013
++#define MASK_CRC32_D  0xfff0707f
++#define MATCH_CRC32C_B 0x61801013
++#define MASK_CRC32C_B  0xfff0707f
++#define MATCH_CRC32C_H 0x61901013
++#define MASK_CRC32C_H  0xfff0707f
++#define MATCH_CRC32C_W 0x61A01013
++#define MASK_CRC32C_W  0xfff0707f
++#define MATCH_CRC32C_D 0x61B01013
++#define MASK_CRC32C_D  0xfff0707f
++#define MATCH_MIN 0xa004033
++#define MASK_MIN  0xfe00707f
++#define MATCH_MINU 0xa005033
++#define MASK_MINU  0xfe00707f
++#define MATCH_MAX 0xa006033
++#define MASK_MAX  0xfe00707f
++#define MATCH_MAXU 0xa007033
++#define MASK_MAXU  0xfe00707f
++#define MATCH_SHFL 0x8001033
++#define MASK_SHFL  0xfe00707f
++#define MATCH_SHFLI 0x8001013
++#define MASK_SHFLI  0xfc00707f
++#define MATCH_UNSHFL 0x8005033
++#define MASK_UNSHFL  0xfe00707f
++#define MATCH_UNSHFLI 0x8005013
++#define MASK_UNSHFLI  0xfc00707f
++#define MATCH_BCOMPRESS 0x8006033
++#define MASK_BCOMPRESS  0xfe00707f
++#define MATCH_BDECOMPRESS 0x48006033
++#define MASK_BDECOMPRESS  0xfe00707f
++#define MATCH_SEXT_B 0x60401013
++#define MASK_SEXT_B  0xfff0707f
++#define MATCH_SEXT_H 0x60501013
++#define MASK_SEXT_H  0xfff0707f
++#define MATCH_PACK 0x8004033
++#define MASK_PACK  0xfe00707f
++#define MATCH_PACKU 0x48004033
++#define MASK_PACKU  0xfe00707f
++#define MATCH_BMATOR 0x8003033
++#define MASK_BMATOR  0xfe00707f
++#define MATCH_BMATXOR 0x48003033
++#define MASK_BMATXOR  0xfe00707f
++#define MATCH_PACKH 0x8007033
++#define MASK_PACKH  0xfe00707f
++#define MATCH_BFP 0x48007033
++#define MASK_BFP  0xfe00707f
++#define MATCH_PACKW 0x800403b
++#define MASK_PACKW  0xfe00707f
++#define MATCH_PACKUW 0x4800403b
++#define MASK_PACKUW  0xfe00707f
++#define MATCH_ANDN 0x40007033
++#define MASK_ANDN  0xfe00707f
++#define MATCH_ORN 0x40006033
++#define MASK_ORN  0xfe00707f
++#define MATCH_XNOR 0x40004033
++#define MASK_XNOR  0xfe00707f
++#define MATCH_ROL 0x60001033
++#define MASK_ROL  0xfe00707f
++#define MATCH_ROR 0x60005033
++#define MASK_ROR  0xfe00707f
++#define MATCH_RORI 0x60005013
++#define MASK_RORI  0xfc00707f
++#define MATCH_BCLR 0x48001033
++#define MASK_BCLR  0xfe00707f
++#define MATCH_BCLRI 0x48001013
++#define MASK_BCLRI  0xfc00707f
++#define MATCH_BSET 0x28001033
++#define MASK_BSET  0xfe00707f
++#define MATCH_BSETI 0x28001013
++#define MASK_BSETI  0xfc00707f
++#define MATCH_BINV 0x68001033
++#define MASK_BINV  0xfe00707f
++#define MATCH_BINVI 0x68001013
++#define MASK_BINVI  0xfc00707f
++#define MATCH_BEXT 0x48005033
++#define MASK_BEXT  0xfe00707f
++#define MATCH_BEXTI 0x48005013
++#define MASK_BEXTI  0xfc00707f
++#define MATCH_GREV 0x68005033
++#define MASK_GREV  0xfe00707f
++#define MATCH_GREVI 0x68005013
++#define MASK_GREVI  0xfc00707f
++#define MATCH_GORC 0x28005033
++#define MASK_GORC  0xfe00707f
++#define MATCH_GORCI 0x28005013
++#define MASK_GORCI  0xfc00707f
++#define MATCH_CMIX 0x6001033
++#define MASK_CMIX  0x600707f
++#define MATCH_CMOV 0x6005033
++#define MASK_CMOV  0x600707f
++#define MATCH_FSL 0x4001033
++#define MASK_FSL  0x600707f
++#define MATCH_FSR 0x4005033
++#define MASK_FSR  0x600707f
++#define MATCH_FSRI 0x4005013
++#define MASK_FSRI  0x400707f
++#define MATCH_CLZW 0x6000101b
++#define MASK_CLZW  0xfff0707f
++#define MATCH_CTZW 0x6010101b
++#define MASK_CTZW  0xfff0707f
++#define MATCH_CPOPW 0x6020101b
++#define MASK_CPOPW  0xfff0707f
++#define MATCH_ROLW 0x6000103b
++#define MASK_ROLW  0xfe00707f
++#define MATCH_RORW 0x6000503b
++#define MASK_RORW  0xfe00707f
++#define MATCH_RORIW 0x6000501b
++#define MASK_RORIW  0xfe00707f
++#define MATCH_SH1ADD 0x20002033
++#define MASK_SH1ADD  0xfe00707f
++#define MATCH_SH2ADD 0x20004033
++#define MASK_SH2ADD  0xfe00707f
++#define MATCH_SH3ADD 0x20006033
++#define MASK_SH3ADD  0xfe00707f
++#define MATCH_SH1ADD_UW 0x2000203b
++#define MASK_SH1ADD_UW  0xfe00707f
++#define MATCH_SH2ADD_UW 0x2000403b
++#define MASK_SH2ADD_UW  0xfe00707f
++#define MATCH_SH3ADD_UW 0x2000603b
++#define MASK_SH3ADD_UW  0xfe00707f
++#define MATCH_BCLRW 0x4800103b
++#define MASK_BCLRW  0xfe00707f
++#define MATCH_BSETW 0x2800103b
++#define MASK_BSETW  0xfe00707f
++#define MATCH_BINVW 0x6800103b
++#define MASK_BINVW  0xfe00707f
++#define MATCH_BEXTW 0x4800503b
++#define MASK_BEXTW  0xfe00707f
++#define MATCH_GORCW 0x2800503b
++#define MASK_GORCW  0xfe00707f
++#define MATCH_GREVW 0x6800503b
++#define MASK_GREVW  0xfe00707f
++#define MATCH_SLOIW 0x2000101b
++#define MASK_SLOIW  0xfe00707f
++#define MATCH_SROIW 0x2000501b
++#define MASK_SROIW  0xfe00707f
++#define MATCH_BCLRIW 0x4800101b
++#define MASK_BCLRIW  0xfe00707f
++#define MATCH_BSETIW 0x2800101b
++#define MASK_BSETIW  0xfe00707f
++#define MATCH_BINVIW 0x6800101b
++#define MASK_BINVIW  0xfe00707f
++#define MATCH_GORCIW 0x2800501b
++#define MASK_GORCIW  0xfe00707f
++#define MATCH_GREVIW 0x6800501b
++#define MASK_GREVIW  0xfe00707f
++#define MATCH_FSLW 0x400103b
++#define MASK_FSLW  0x600707f
++#define MATCH_FSRW 0x400503B
++#define MASK_FSRW  0x600707f
++#define MATCH_FSRIW 0x400501B
++#define MASK_FSRIW  0x600707f
++#define MATCH_SHFLW 0x800103b
++#define MASK_SHFLW  0xfe00707f
++#define MATCH_UNSHFLW 0x800503b
++#define MASK_UNSHFLW  0xfe00707f
++#define MATCH_BCOMPRESSW 0x800603B
++#define MASK_BCOMPRESSW  0xfe00707f
++#define MATCH_BDECOMPRESSW 0x4800603b
++#define MASK_BDECOMPRESSW  0xfe00707f
++#define MATCH_BFPW 0x4800703b
++#define MASK_BFPW  0xfe00707f
++#define MATCH_XPERM_N 0x28002033
++#define MASK_XPERM_N  0xfe00707f
++#define MATCH_XPERM_B 0x28004033
++#define MASK_XPERM_B  0xfe00707f
++#define MATCH_XPERM_H 0x28006033
++#define MASK_XPERM_H  0xfe00707f
++#define MATCH_XPERM_W 0x28000033
++#define MASK_XPERM_W  0xfe00707f
++#define MATCH_ADD_UW 0x800003b
++#define MASK_ADD_UW  0xfe00707f
++#define MATCH_SLLI_UW 0x800101b
++#define MASK_SLLI_UW  0xfc00707f
++#define MATCH_CLMUL 0xa001033
++#define MASK_CLMUL  0xfe00707f
++#define MATCH_CLMULH 0xa003033
++#define MASK_CLMULH  0xfe00707f
++#define MATCH_CLMULR 0xa002033
++#define MASK_CLMULR  0xfe00707f
+ #define MATCH_FLW 0x2007
+ #define MASK_FLW  0x707f
+ #define MATCH_FLD 0x3007
+@@ -547,6 +765,1854 @@
+ #define MASK_C_LDSP  0xe003
+ #define MATCH_C_SDSP 0xe002
+ #define MASK_C_SDSP  0xe003
++
++/* RVV */
++/* Version 1.0-draft-20210130.  */
++
++/* Temporary configuration-setting encoding info
++
++`-` means zimm
++
++31 30 zimm  RS2   RS1/uimm funct3 RD    opcode
++1  0  00000 xxxxx xxxxx    111    xxxxx 1010111 vsetvl
++1  1  ----- ----- xxxxx    111    xxxxx 1010111 vsetivli
++0  -  ----- ----- xxxxx    111    xxxxx 1010111 vsetvli
++*/
++
++#define MATCH_VSETVL   0x80007057
++#define MASK_VSETVL    0xfe00707f
++#define MATCH_VSETIVLI 0xc0007057
++#define MASK_VSETIVLI  0xc000707f
++#define MATCH_VSETVLI  0x00007057
++#define MASK_VSETVLI   0x8000707f
++
++/* Temporary Load/store encoding info
++
++MOP load
++00 unit-stride		LE<EEW>, VLE<EEW>FF, VL<nf>RE<EEW> (nf = 1, 2, 4, 8)
++01 indexed-unordered	VLUXEI<EEW>
++10 strided		VLSE<EEW>
++11 indexed-ordered	VLOXEI<EEW>
++
++MOP store
++00 unit-stride		VSE<EEW>, VS<nf>R (nf = 1, 2, 4, 8)
++01 indexed-unordered	VSUXEI<EEW>
++10 strided		VSSE<EEW>
++11 indexed-ordered	VSOXEI<EEW>
++
++VM 0 masked
++VM 1 unmasked
++
++LUMOP
++00000 unit-stride load
++01000 unit-stride, whole registers load
++01011 unit-stride, mask load, EEW = 1
++10000 unit-stride first-fault
++xxxxx other encodings reserved, x != 0
++
++SUMOP
++00000 unit-stride store
++01000 unit-stride, whole registers store
++01011 unit-stride, mask store, EEW = 1
++0xxxx other encodings reserved, x != 0
++
++`-` means EEW =
++MEW WIDTH
++x   001   FLH/FSH
++x   010   FLW/FSW
++x   011   FLD/FSW
++x   100   FLQ/FSQ
++0   000   VLxE8/VSxE8, VLxEI8/VSxEI8, VL<nf>RE8, VS<nf>R
++0   101   VLxE16/VSxE16, VLxEI16/VSxEI16, VL<nf>RE16
++0   110   VLxE32/VSxE32, VLxEI32/VSxEI32, VL<nf>RE32
++0   111   VLxE64/VSxE64, VLxEI64/VSxEI64, VL<nf>RE64
++1   000   Reserved (VLxE128/VSxE128, VL<nf>RE128)
++1   101   Reserved (VLxE256/VSxE256, VL<nf>RE256)
++1   110   Reserved (VLxE512/VSxE512, VL<nf>RE512)
++1   111   Reserved (VLxE1024/VSxE1024, VL<nf>RE1024)
++
++NF  MEW MOP VM LUMOP/RS2 RS1   WIDTH VD    opcode
++000 -   00  x  00000     xxxxx ---   xxxxx 0000111 VLE<EEW>
++000 -   00  x  00000     xxxxx ---   xxxxx 0100111 VSE<EEW>
++000 -   00  1  01011     xxxxx ---   xxxxx 0000111 VLE, EEW = 1
++000 -   00  1  01011     xxxxx ---   xxxxx 0100111 VSE, EEW = 1
++000 -   10  x  xxxxx     xxxxx ---   xxxxx 0000111 VLSE<EEW>
++000 -   10  x  xxxxx     xxxxx ---   xxxxx 0100111 VSSE<EEW>
++000 0   11  x  xxxxx     xxxxx ---   xxxxx 0000111 VLOXE<EEW>I
++000 0   11  x  xxxxx     xxxxx ---   xxxxx 0100111 VSOXE<EEW>I
++000 0   01  x  xxxxx     xxxxx ---   xxxxx 0000111 VLUXE<EEW>I
++000 0   01  x  xxxxx     xxxxx ---   xxxxx 0100111 VSUXE<EEW>I
++000 -   00  x  10000     xxxxx ---   xxxxx 0000111 VLE<EEW>FF
++xxx -   00  1  01000     xxxxx ---   xxxxx 0000111 VL<nf>RE<EEW>, nf = 1,2,4,8
++xxx 0   00  1  01000     xxxxx 000   xxxxx 0100111 VS<nf>R, nf = 1,2,4,8
++
++xxx -   00  x  00000     xxxxx ---   xxxxx 0000111 VLSEG<nf>E<EEW>
++xxx -   00  x  00000     xxxxx ---   xxxxx 0100111 VSSEG<nf>E<EEW>
++xxx -   10  x  00000     xxxxx ---   xxxxx 0000111 VLSSEG<nf>E<EEW>
++xxx -   10  x  00000     xxxxx ---   xxxxx 0100111 VSSSEG<nf>E<EEW>
++xxx -   11  x  00000     xxxxx ---   xxxxx 0000111 VLOXSEG<nf>E<EEW>I
++xxx -   11  x  00000     xxxxx ---   xxxxx 0100111 VSOXSEG<nf>E<EEW>I
++xxx -   01  x  00000     xxxxx ---   xxxxx 0000111 VLUXSEG<nf>E<EEW>I
++xxx -   01  x  00000     xxxxx ---   xxxxx 0100111 VSUXSEG<nf>E<EEW>I
++xxx -   00  x  10000     xxxxx ---   xxxxx 0000111 VLSEG<nf>E<EEW>FF
++*/
++
++#define MATCH_VLE1V    0x02b00007
++#define MASK_VLE1V     0xfff0707f
++#define MATCH_VSE1V    0x02b00027
++#define MASK_VSE1V     0xfff0707f
++
++#define MATCH_VLE8V    0x00000007
++#define MASK_VLE8V     0xfdf0707f
++#define MATCH_VLE16V   0x00005007
++#define MASK_VLE16V    0xfdf0707f
++#define MATCH_VLE32V   0x00006007
++#define MASK_VLE32V    0xfdf0707f
++#define MATCH_VLE64V   0x00007007
++#define MASK_VLE64V    0xfdf0707f
++
++#define MATCH_VSE8V    0x00000027
++#define MASK_VSE8V     0xfdf0707f
++#define MATCH_VSE16V   0x00005027
++#define MASK_VSE16V    0xfdf0707f
++#define MATCH_VSE32V   0x00006027
++#define MASK_VSE32V    0xfdf0707f
++#define MATCH_VSE64V   0x00007027
++#define MASK_VSE64V    0xfdf0707f
++
++#define MATCH_VLSE8V    0x08000007
++#define MASK_VLSE8V     0xfc00707f
++#define MATCH_VLSE16V   0x08005007
++#define MASK_VLSE16V    0xfc00707f
++#define MATCH_VLSE32V   0x08006007
++#define MASK_VLSE32V    0xfc00707f
++#define MATCH_VLSE64V   0x08007007
++#define MASK_VLSE64V    0xfc00707f
++
++#define MATCH_VSSE8V    0x08000027
++#define MASK_VSSE8V     0xfc00707f
++#define MATCH_VSSE16V   0x08005027
++#define MASK_VSSE16V    0xfc00707f
++#define MATCH_VSSE32V   0x08006027
++#define MASK_VSSE32V    0xfc00707f
++#define MATCH_VSSE64V   0x08007027
++#define MASK_VSSE64V    0xfc00707f
++
++#define MATCH_VLOXEI8V    0x0c000007
++#define MASK_VLOXEI8V     0xfc00707f
++#define MATCH_VLOXEI16V   0x0c005007
++#define MASK_VLOXEI16V    0xfc00707f
++#define MATCH_VLOXEI32V   0x0c006007
++#define MASK_VLOXEI32V    0xfc00707f
++#define MATCH_VLOXEI64V   0x0c007007
++#define MASK_VLOXEI64V    0xfc00707f
++
++#define MATCH_VSOXEI8V    0x0c000027
++#define MASK_VSOXEI8V     0xfc00707f
++#define MATCH_VSOXEI16V   0x0c005027
++#define MASK_VSOXEI16V    0xfc00707f
++#define MATCH_VSOXEI32V   0x0c006027
++#define MASK_VSOXEI32V    0xfc00707f
++#define MATCH_VSOXEI64V   0x0c007027
++#define MASK_VSOXEI64V    0xfc00707f
++
++#define MATCH_VLUXEI8V    0x04000007
++#define MASK_VLUXEI8V     0xfc00707f
++#define MATCH_VLUXEI16V   0x04005007
++#define MASK_VLUXEI16V    0xfc00707f
++#define MATCH_VLUXEI32V   0x04006007
++#define MASK_VLUXEI32V    0xfc00707f
++#define MATCH_VLUXEI64V   0x04007007
++#define MASK_VLUXEI64V    0xfc00707f
++
++#define MATCH_VSUXEI8V    0x04000027
++#define MASK_VSUXEI8V     0xfc00707f
++#define MATCH_VSUXEI16V   0x04005027
++#define MASK_VSUXEI16V    0xfc00707f
++#define MATCH_VSUXEI32V   0x04006027
++#define MASK_VSUXEI32V    0xfc00707f
++#define MATCH_VSUXEI64V   0x04007027
++#define MASK_VSUXEI64V    0xfc00707f
++
++#define MATCH_VLE8FFV    0x01000007
++#define MASK_VLE8FFV     0xfdf0707f
++#define MATCH_VLE16FFV   0x01005007
++#define MASK_VLE16FFV    0xfdf0707f
++#define MATCH_VLE32FFV   0x01006007
++#define MASK_VLE32FFV    0xfdf0707f
++#define MATCH_VLE64FFV   0x01007007
++#define MASK_VLE64FFV    0xfdf0707f
++
++#define MATCH_VLSEG2E8V  0x20000007
++#define MASK_VLSEG2E8V   0xfdf0707f
++#define MATCH_VSSEG2E8V  0x20000027
++#define MASK_VSSEG2E8V   0xfdf0707f
++#define MATCH_VLSEG3E8V  0x40000007
++#define MASK_VLSEG3E8V   0xfdf0707f
++#define MATCH_VSSEG3E8V  0x40000027
++#define MASK_VSSEG3E8V   0xfdf0707f
++#define MATCH_VLSEG4E8V  0x60000007
++#define MASK_VLSEG4E8V   0xfdf0707f
++#define MATCH_VSSEG4E8V  0x60000027
++#define MASK_VSSEG4E8V   0xfdf0707f
++#define MATCH_VLSEG5E8V  0x80000007
++#define MASK_VLSEG5E8V   0xfdf0707f
++#define MATCH_VSSEG5E8V  0x80000027
++#define MASK_VSSEG5E8V   0xfdf0707f
++#define MATCH_VLSEG6E8V  0xa0000007
++#define MASK_VLSEG6E8V   0xfdf0707f
++#define MATCH_VSSEG6E8V  0xa0000027
++#define MASK_VSSEG6E8V   0xfdf0707f
++#define MATCH_VLSEG7E8V  0xc0000007
++#define MASK_VLSEG7E8V   0xfdf0707f
++#define MATCH_VSSEG7E8V  0xc0000027
++#define MASK_VSSEG7E8V   0xfdf0707f
++#define MATCH_VLSEG8E8V  0xe0000007
++#define MASK_VLSEG8E8V   0xfdf0707f
++#define MATCH_VSSEG8E8V  0xe0000027
++#define MASK_VSSEG8E8V   0xfdf0707f
++
++#define MATCH_VLSEG2E16V  0x20005007
++#define MASK_VLSEG2E16V   0xfdf0707f
++#define MATCH_VSSEG2E16V  0x20005027
++#define MASK_VSSEG2E16V   0xfdf0707f
++#define MATCH_VLSEG3E16V  0x40005007
++#define MASK_VLSEG3E16V   0xfdf0707f
++#define MATCH_VSSEG3E16V  0x40005027
++#define MASK_VSSEG3E16V   0xfdf0707f
++#define MATCH_VLSEG4E16V  0x60005007
++#define MASK_VLSEG4E16V   0xfdf0707f
++#define MATCH_VSSEG4E16V  0x60005027
++#define MASK_VSSEG4E16V   0xfdf0707f
++#define MATCH_VLSEG5E16V  0x80005007
++#define MASK_VLSEG5E16V   0xfdf0707f
++#define MATCH_VSSEG5E16V  0x80005027
++#define MASK_VSSEG5E16V   0xfdf0707f
++#define MATCH_VLSEG6E16V  0xa0005007
++#define MASK_VLSEG6E16V   0xfdf0707f
++#define MATCH_VSSEG6E16V  0xa0005027
++#define MASK_VSSEG6E16V   0xfdf0707f
++#define MATCH_VLSEG7E16V  0xc0005007
++#define MASK_VLSEG7E16V   0xfdf0707f
++#define MATCH_VSSEG7E16V  0xc0005027
++#define MASK_VSSEG7E16V   0xfdf0707f
++#define MATCH_VLSEG8E16V  0xe0005007
++#define MASK_VLSEG8E16V   0xfdf0707f
++#define MATCH_VSSEG8E16V  0xe0005027
++#define MASK_VSSEG8E16V   0xfdf0707f
++
++#define MATCH_VLSEG2E32V  0x20006007
++#define MASK_VLSEG2E32V   0xfdf0707f
++#define MATCH_VSSEG2E32V  0x20006027
++#define MASK_VSSEG2E32V   0xfdf0707f
++#define MATCH_VLSEG3E32V  0x40006007
++#define MASK_VLSEG3E32V   0xfdf0707f
++#define MATCH_VSSEG3E32V  0x40006027
++#define MASK_VSSEG3E32V   0xfdf0707f
++#define MATCH_VLSEG4E32V  0x60006007
++#define MASK_VLSEG4E32V   0xfdf0707f
++#define MATCH_VSSEG4E32V  0x60006027
++#define MASK_VSSEG4E32V   0xfdf0707f
++#define MATCH_VLSEG5E32V  0x80006007
++#define MASK_VLSEG5E32V   0xfdf0707f
++#define MATCH_VSSEG5E32V  0x80006027
++#define MASK_VSSEG5E32V   0xfdf0707f
++#define MATCH_VLSEG6E32V  0xa0006007
++#define MASK_VLSEG6E32V   0xfdf0707f
++#define MATCH_VSSEG6E32V  0xa0006027
++#define MASK_VSSEG6E32V   0xfdf0707f
++#define MATCH_VLSEG7E32V  0xc0006007
++#define MASK_VLSEG7E32V   0xfdf0707f
++#define MATCH_VSSEG7E32V  0xc0006027
++#define MASK_VSSEG7E32V   0xfdf0707f
++#define MATCH_VLSEG8E32V  0xe0006007
++#define MASK_VLSEG8E32V   0xfdf0707f
++#define MATCH_VSSEG8E32V  0xe0006027
++#define MASK_VSSEG8E32V   0xfdf0707f
++
++#define MATCH_VLSEG2E64V  0x20007007
++#define MASK_VLSEG2E64V   0xfdf0707f
++#define MATCH_VSSEG2E64V  0x20007027
++#define MASK_VSSEG2E64V   0xfdf0707f
++#define MATCH_VLSEG3E64V  0x40007007
++#define MASK_VLSEG3E64V   0xfdf0707f
++#define MATCH_VSSEG3E64V  0x40007027
++#define MASK_VSSEG3E64V   0xfdf0707f
++#define MATCH_VLSEG4E64V  0x60007007
++#define MASK_VLSEG4E64V   0xfdf0707f
++#define MATCH_VSSEG4E64V  0x60007027
++#define MASK_VSSEG4E64V   0xfdf0707f
++#define MATCH_VLSEG5E64V  0x80007007
++#define MASK_VLSEG5E64V   0xfdf0707f
++#define MATCH_VSSEG5E64V  0x80007027
++#define MASK_VSSEG5E64V   0xfdf0707f
++#define MATCH_VLSEG6E64V  0xa0007007
++#define MASK_VLSEG6E64V   0xfdf0707f
++#define MATCH_VSSEG6E64V  0xa0007027
++#define MASK_VSSEG6E64V   0xfdf0707f
++#define MATCH_VLSEG7E64V  0xc0007007
++#define MASK_VLSEG7E64V   0xfdf0707f
++#define MATCH_VSSEG7E64V  0xc0007027
++#define MASK_VSSEG7E64V   0xfdf0707f
++#define MATCH_VLSEG8E64V  0xe0007007
++#define MASK_VLSEG8E64V   0xfdf0707f
++#define MATCH_VSSEG8E64V  0xe0007027
++#define MASK_VSSEG8E64V   0xfdf0707f
++
++#define MATCH_VLSSEG2E8V  0x28000007
++#define MASK_VLSSEG2E8V   0xfc00707f
++#define MATCH_VSSSEG2E8V  0x28000027
++#define MASK_VSSSEG2E8V   0xfc00707f
++#define MATCH_VLSSEG3E8V  0x48000007
++#define MASK_VLSSEG3E8V   0xfc00707f
++#define MATCH_VSSSEG3E8V  0x48000027
++#define MASK_VSSSEG3E8V   0xfc00707f
++#define MATCH_VLSSEG4E8V  0x68000007
++#define MASK_VLSSEG4E8V   0xfc00707f
++#define MATCH_VSSSEG4E8V  0x68000027
++#define MASK_VSSSEG4E8V   0xfc00707f
++#define MATCH_VLSSEG5E8V  0x88000007
++#define MASK_VLSSEG5E8V   0xfc00707f
++#define MATCH_VSSSEG5E8V  0x88000027
++#define MASK_VSSSEG5E8V   0xfc00707f
++#define MATCH_VLSSEG6E8V  0xa8000007
++#define MASK_VLSSEG6E8V   0xfc00707f
++#define MATCH_VSSSEG6E8V  0xa8000027
++#define MASK_VSSSEG6E8V   0xfc00707f
++#define MATCH_VLSSEG7E8V  0xc8000007
++#define MASK_VLSSEG7E8V   0xfc00707f
++#define MATCH_VSSSEG7E8V  0xc8000027
++#define MASK_VSSSEG7E8V   0xfc00707f
++#define MATCH_VLSSEG8E8V  0xe8000007
++#define MASK_VLSSEG8E8V   0xfc00707f
++#define MATCH_VSSSEG8E8V  0xe8000027
++#define MASK_VSSSEG8E8V   0xfc00707f
++
++#define MATCH_VLSSEG2E16V  0x28005007
++#define MASK_VLSSEG2E16V   0xfc00707f
++#define MATCH_VSSSEG2E16V  0x28005027
++#define MASK_VSSSEG2E16V   0xfc00707f
++#define MATCH_VLSSEG3E16V  0x48005007
++#define MASK_VLSSEG3E16V   0xfc00707f
++#define MATCH_VSSSEG3E16V  0x48005027
++#define MASK_VSSSEG3E16V   0xfc00707f
++#define MATCH_VLSSEG4E16V  0x68005007
++#define MASK_VLSSEG4E16V   0xfc00707f
++#define MATCH_VSSSEG4E16V  0x68005027
++#define MASK_VSSSEG4E16V   0xfc00707f
++#define MATCH_VLSSEG5E16V  0x88005007
++#define MASK_VLSSEG5E16V   0xfc00707f
++#define MATCH_VSSSEG5E16V  0x88005027
++#define MASK_VSSSEG5E16V   0xfc00707f
++#define MATCH_VLSSEG6E16V  0xa8005007
++#define MASK_VLSSEG6E16V   0xfc00707f
++#define MATCH_VSSSEG6E16V  0xa8005027
++#define MASK_VSSSEG6E16V   0xfc00707f
++#define MATCH_VLSSEG7E16V  0xc8005007
++#define MASK_VLSSEG7E16V   0xfc00707f
++#define MATCH_VSSSEG7E16V  0xc8005027
++#define MASK_VSSSEG7E16V   0xfc00707f
++#define MATCH_VLSSEG8E16V  0xe8005007
++#define MASK_VLSSEG8E16V   0xfc00707f
++#define MATCH_VSSSEG8E16V  0xe8005027
++#define MASK_VSSSEG8E16V   0xfc00707f
++
++#define MATCH_VLSSEG2E32V  0x28006007
++#define MASK_VLSSEG2E32V   0xfc00707f
++#define MATCH_VSSSEG2E32V  0x28006027
++#define MASK_VSSSEG2E32V   0xfc00707f
++#define MATCH_VLSSEG3E32V  0x48006007
++#define MASK_VLSSEG3E32V   0xfc00707f
++#define MATCH_VSSSEG3E32V  0x48006027
++#define MASK_VSSSEG3E32V   0xfc00707f
++#define MATCH_VLSSEG4E32V  0x68006007
++#define MASK_VLSSEG4E32V   0xfc00707f
++#define MATCH_VSSSEG4E32V  0x68006027
++#define MASK_VSSSEG4E32V   0xfc00707f
++#define MATCH_VLSSEG5E32V  0x88006007
++#define MASK_VLSSEG5E32V   0xfc00707f
++#define MATCH_VSSSEG5E32V  0x88006027
++#define MASK_VSSSEG5E32V   0xfc00707f
++#define MATCH_VLSSEG6E32V  0xa8006007
++#define MASK_VLSSEG6E32V   0xfc00707f
++#define MATCH_VSSSEG6E32V  0xa8006027
++#define MASK_VSSSEG6E32V   0xfc00707f
++#define MATCH_VLSSEG7E32V  0xc8006007
++#define MASK_VLSSEG7E32V   0xfc00707f
++#define MATCH_VSSSEG7E32V  0xc8006027
++#define MASK_VSSSEG7E32V   0xfc00707f
++#define MATCH_VLSSEG8E32V  0xe8006007
++#define MASK_VLSSEG8E32V   0xfc00707f
++#define MATCH_VSSSEG8E32V  0xe8006027
++#define MASK_VSSSEG8E32V   0xfc00707f
++
++#define MATCH_VLSSEG2E64V  0x28007007
++#define MASK_VLSSEG2E64V   0xfc00707f
++#define MATCH_VSSSEG2E64V  0x28007027
++#define MASK_VSSSEG2E64V   0xfc00707f
++#define MATCH_VLSSEG3E64V  0x48007007
++#define MASK_VLSSEG3E64V   0xfc00707f
++#define MATCH_VSSSEG3E64V  0x48007027
++#define MASK_VSSSEG3E64V   0xfc00707f
++#define MATCH_VLSSEG4E64V  0x68007007
++#define MASK_VLSSEG4E64V   0xfc00707f
++#define MATCH_VSSSEG4E64V  0x68007027
++#define MASK_VSSSEG4E64V   0xfc00707f
++#define MATCH_VLSSEG5E64V  0x88007007
++#define MASK_VLSSEG5E64V   0xfc00707f
++#define MATCH_VSSSEG5E64V  0x88007027
++#define MASK_VSSSEG5E64V   0xfc00707f
++#define MATCH_VLSSEG6E64V  0xa8007007
++#define MASK_VLSSEG6E64V   0xfc00707f
++#define MATCH_VSSSEG6E64V  0xa8007027
++#define MASK_VSSSEG6E64V   0xfc00707f
++#define MATCH_VLSSEG7E64V  0xc8007007
++#define MASK_VLSSEG7E64V   0xfc00707f
++#define MATCH_VSSSEG7E64V  0xc8007027
++#define MASK_VSSSEG7E64V   0xfc00707f
++#define MATCH_VLSSEG8E64V  0xe8007007
++#define MASK_VLSSEG8E64V   0xfc00707f
++#define MATCH_VSSSEG8E64V  0xe8007027
++#define MASK_VSSSEG8E64V   0xfc00707f
++
++#define MATCH_VLOXSEG2EI8V  0x2c000007
++#define MASK_VLOXSEG2EI8V   0xfc00707f
++#define MATCH_VSOXSEG2EI8V  0x2c000027
++#define MASK_VSOXSEG2EI8V   0xfc00707f
++#define MATCH_VLOXSEG3EI8V  0x4c000007
++#define MASK_VLOXSEG3EI8V   0xfc00707f
++#define MATCH_VSOXSEG3EI8V  0x4c000027
++#define MASK_VSOXSEG3EI8V   0xfc00707f
++#define MATCH_VLOXSEG4EI8V  0x6c000007
++#define MASK_VLOXSEG4EI8V   0xfc00707f
++#define MATCH_VSOXSEG4EI8V  0x6c000027
++#define MASK_VSOXSEG4EI8V   0xfc00707f
++#define MATCH_VLOXSEG5EI8V  0x8c000007
++#define MASK_VLOXSEG5EI8V   0xfc00707f
++#define MATCH_VSOXSEG5EI8V  0x8c000027
++#define MASK_VSOXSEG5EI8V   0xfc00707f
++#define MATCH_VLOXSEG6EI8V  0xac000007
++#define MASK_VLOXSEG6EI8V   0xfc00707f
++#define MATCH_VSOXSEG6EI8V  0xac000027
++#define MASK_VSOXSEG6EI8V   0xfc00707f
++#define MATCH_VLOXSEG7EI8V  0xcc000007
++#define MASK_VLOXSEG7EI8V   0xfc00707f
++#define MATCH_VSOXSEG7EI8V  0xcc000027
++#define MASK_VSOXSEG7EI8V   0xfc00707f
++#define MATCH_VLOXSEG8EI8V  0xec000007
++#define MASK_VLOXSEG8EI8V   0xfc00707f
++#define MATCH_VSOXSEG8EI8V  0xec000027
++#define MASK_VSOXSEG8EI8V   0xfc00707f
++
++#define MATCH_VLUXSEG2EI8V  0x24000007
++#define MASK_VLUXSEG2EI8V   0xfc00707f
++#define MATCH_VSUXSEG2EI8V  0x24000027
++#define MASK_VSUXSEG2EI8V   0xfc00707f
++#define MATCH_VLUXSEG3EI8V  0x44000007
++#define MASK_VLUXSEG3EI8V   0xfc00707f
++#define MATCH_VSUXSEG3EI8V  0x44000027
++#define MASK_VSUXSEG3EI8V   0xfc00707f
++#define MATCH_VLUXSEG4EI8V  0x64000007
++#define MASK_VLUXSEG4EI8V   0xfc00707f
++#define MATCH_VSUXSEG4EI8V  0x64000027
++#define MASK_VSUXSEG4EI8V   0xfc00707f
++#define MATCH_VLUXSEG5EI8V  0x84000007
++#define MASK_VLUXSEG5EI8V   0xfc00707f
++#define MATCH_VSUXSEG5EI8V  0x84000027
++#define MASK_VSUXSEG5EI8V   0xfc00707f
++#define MATCH_VLUXSEG6EI8V  0xa4000007
++#define MASK_VLUXSEG6EI8V   0xfc00707f
++#define MATCH_VSUXSEG6EI8V  0xa4000027
++#define MASK_VSUXSEG6EI8V   0xfc00707f
++#define MATCH_VLUXSEG7EI8V  0xc4000007
++#define MASK_VLUXSEG7EI8V   0xfc00707f
++#define MATCH_VSUXSEG7EI8V  0xc4000027
++#define MASK_VSUXSEG7EI8V   0xfc00707f
++#define MATCH_VLUXSEG8EI8V  0xe4000007
++#define MASK_VLUXSEG8EI8V   0xfc00707f
++#define MATCH_VSUXSEG8EI8V  0xe4000027
++#define MASK_VSUXSEG8EI8V   0xfc00707f
++
++#define MATCH_VLOXSEG2EI16V  0x2c005007
++#define MASK_VLOXSEG2EI16V   0xfc00707f
++#define MATCH_VSOXSEG2EI16V  0x2c005027
++#define MASK_VSOXSEG2EI16V   0xfc00707f
++#define MATCH_VLOXSEG3EI16V  0x4c005007
++#define MASK_VLOXSEG3EI16V   0xfc00707f
++#define MATCH_VSOXSEG3EI16V  0x4c005027
++#define MASK_VSOXSEG3EI16V   0xfc00707f
++#define MATCH_VLOXSEG4EI16V  0x6c005007
++#define MASK_VLOXSEG4EI16V   0xfc00707f
++#define MATCH_VSOXSEG4EI16V  0x6c005027
++#define MASK_VSOXSEG4EI16V   0xfc00707f
++#define MATCH_VLOXSEG5EI16V  0x8c005007
++#define MASK_VLOXSEG5EI16V   0xfc00707f
++#define MATCH_VSOXSEG5EI16V  0x8c005027
++#define MASK_VSOXSEG5EI16V   0xfc00707f
++#define MATCH_VLOXSEG6EI16V  0xac005007
++#define MASK_VLOXSEG6EI16V   0xfc00707f
++#define MATCH_VSOXSEG6EI16V  0xac005027
++#define MASK_VSOXSEG6EI16V   0xfc00707f
++#define MATCH_VLOXSEG7EI16V  0xcc005007
++#define MASK_VLOXSEG7EI16V   0xfc00707f
++#define MATCH_VSOXSEG7EI16V  0xcc005027
++#define MASK_VSOXSEG7EI16V   0xfc00707f
++#define MATCH_VLOXSEG8EI16V  0xec005007
++#define MASK_VLOXSEG8EI16V   0xfc00707f
++#define MATCH_VSOXSEG8EI16V  0xec005027
++#define MASK_VSOXSEG8EI16V   0xfc00707f
++
++#define MATCH_VLUXSEG2EI16V  0x24005007
++#define MASK_VLUXSEG2EI16V   0xfc00707f
++#define MATCH_VSUXSEG2EI16V  0x24005027
++#define MASK_VSUXSEG2EI16V   0xfc00707f
++#define MATCH_VLUXSEG3EI16V  0x44005007
++#define MASK_VLUXSEG3EI16V   0xfc00707f
++#define MATCH_VSUXSEG3EI16V  0x44005027
++#define MASK_VSUXSEG3EI16V   0xfc00707f
++#define MATCH_VLUXSEG4EI16V  0x64005007
++#define MASK_VLUXSEG4EI16V   0xfc00707f
++#define MATCH_VSUXSEG4EI16V  0x64005027
++#define MASK_VSUXSEG4EI16V   0xfc00707f
++#define MATCH_VLUXSEG5EI16V  0x84005007
++#define MASK_VLUXSEG5EI16V   0xfc00707f
++#define MATCH_VSUXSEG5EI16V  0x84005027
++#define MASK_VSUXSEG5EI16V   0xfc00707f
++#define MATCH_VLUXSEG6EI16V  0xa4005007
++#define MASK_VLUXSEG6EI16V   0xfc00707f
++#define MATCH_VSUXSEG6EI16V  0xa4005027
++#define MASK_VSUXSEG6EI16V   0xfc00707f
++#define MATCH_VLUXSEG7EI16V  0xc4005007
++#define MASK_VLUXSEG7EI16V   0xfc00707f
++#define MATCH_VSUXSEG7EI16V  0xc4005027
++#define MASK_VSUXSEG7EI16V   0xfc00707f
++#define MATCH_VLUXSEG8EI16V  0xe4005007
++#define MASK_VLUXSEG8EI16V   0xfc00707f
++#define MATCH_VSUXSEG8EI16V  0xe4005027
++#define MASK_VSUXSEG8EI16V   0xfc00707f
++
++#define MATCH_VLOXSEG2EI32V  0x2c006007
++#define MASK_VLOXSEG2EI32V   0xfc00707f
++#define MATCH_VSOXSEG2EI32V  0x2c006027
++#define MASK_VSOXSEG2EI32V   0xfc00707f
++#define MATCH_VLOXSEG3EI32V  0x4c006007
++#define MASK_VLOXSEG3EI32V   0xfc00707f
++#define MATCH_VSOXSEG3EI32V  0x4c006027
++#define MASK_VSOXSEG3EI32V   0xfc00707f
++#define MATCH_VLOXSEG4EI32V  0x6c006007
++#define MASK_VLOXSEG4EI32V   0xfc00707f
++#define MATCH_VSOXSEG4EI32V  0x6c006027
++#define MASK_VSOXSEG4EI32V   0xfc00707f
++#define MATCH_VLOXSEG5EI32V  0x8c006007
++#define MASK_VLOXSEG5EI32V   0xfc00707f
++#define MATCH_VSOXSEG5EI32V  0x8c006027
++#define MASK_VSOXSEG5EI32V   0xfc00707f
++#define MATCH_VLOXSEG6EI32V  0xac006007
++#define MASK_VLOXSEG6EI32V   0xfc00707f
++#define MATCH_VSOXSEG6EI32V  0xac006027
++#define MASK_VSOXSEG6EI32V   0xfc00707f
++#define MATCH_VLOXSEG7EI32V  0xcc006007
++#define MASK_VLOXSEG7EI32V   0xfc00707f
++#define MATCH_VSOXSEG7EI32V  0xcc006027
++#define MASK_VSOXSEG7EI32V   0xfc00707f
++#define MATCH_VLOXSEG8EI32V  0xec006007
++#define MASK_VLOXSEG8EI32V   0xfc00707f
++#define MATCH_VSOXSEG8EI32V  0xec006027
++#define MASK_VSOXSEG8EI32V   0xfc00707f
++
++#define MATCH_VLUXSEG2EI32V  0x24006007
++#define MASK_VLUXSEG2EI32V   0xfc00707f
++#define MATCH_VSUXSEG2EI32V  0x24006027
++#define MASK_VSUXSEG2EI32V   0xfc00707f
++#define MATCH_VLUXSEG3EI32V  0x44006007
++#define MASK_VLUXSEG3EI32V   0xfc00707f
++#define MATCH_VSUXSEG3EI32V  0x44006027
++#define MASK_VSUXSEG3EI32V   0xfc00707f
++#define MATCH_VLUXSEG4EI32V  0x64006007
++#define MASK_VLUXSEG4EI32V   0xfc00707f
++#define MATCH_VSUXSEG4EI32V  0x64006027
++#define MASK_VSUXSEG4EI32V   0xfc00707f
++#define MATCH_VLUXSEG5EI32V  0x84006007
++#define MASK_VLUXSEG5EI32V   0xfc00707f
++#define MATCH_VSUXSEG5EI32V  0x84006027
++#define MASK_VSUXSEG5EI32V   0xfc00707f
++#define MATCH_VLUXSEG6EI32V  0xa4006007
++#define MASK_VLUXSEG6EI32V   0xfc00707f
++#define MATCH_VSUXSEG6EI32V  0xa4006027
++#define MASK_VSUXSEG6EI32V   0xfc00707f
++#define MATCH_VLUXSEG7EI32V  0xc4006007
++#define MASK_VLUXSEG7EI32V   0xfc00707f
++#define MATCH_VSUXSEG7EI32V  0xc4006027
++#define MASK_VSUXSEG7EI32V   0xfc00707f
++#define MATCH_VLUXSEG8EI32V  0xe4006007
++#define MASK_VLUXSEG8EI32V   0xfc00707f
++#define MATCH_VSUXSEG8EI32V  0xe4006027
++#define MASK_VSUXSEG8EI32V   0xfc00707f
++
++#define MATCH_VLOXSEG2EI64V  0x2c007007
++#define MASK_VLOXSEG2EI64V   0xfc00707f
++#define MATCH_VSOXSEG2EI64V  0x2c007027
++#define MASK_VSOXSEG2EI64V   0xfc00707f
++#define MATCH_VLOXSEG3EI64V  0x4c007007
++#define MASK_VLOXSEG3EI64V   0xfc00707f
++#define MATCH_VSOXSEG3EI64V  0x4c007027
++#define MASK_VSOXSEG3EI64V   0xfc00707f
++#define MATCH_VLOXSEG4EI64V  0x6c007007
++#define MASK_VLOXSEG4EI64V   0xfc00707f
++#define MATCH_VSOXSEG4EI64V  0x6c007027
++#define MASK_VSOXSEG4EI64V   0xfc00707f
++#define MATCH_VLOXSEG5EI64V  0x8c007007
++#define MASK_VLOXSEG5EI64V   0xfc00707f
++#define MATCH_VSOXSEG5EI64V  0x8c007027
++#define MASK_VSOXSEG5EI64V   0xfc00707f
++#define MATCH_VLOXSEG6EI64V  0xac007007
++#define MASK_VLOXSEG6EI64V   0xfc00707f
++#define MATCH_VSOXSEG6EI64V  0xac007027
++#define MASK_VSOXSEG6EI64V   0xfc00707f
++#define MATCH_VLOXSEG7EI64V  0xcc007007
++#define MASK_VLOXSEG7EI64V   0xfc00707f
++#define MATCH_VSOXSEG7EI64V  0xcc007027
++#define MASK_VSOXSEG7EI64V   0xfc00707f
++#define MATCH_VLOXSEG8EI64V  0xec007007
++#define MASK_VLOXSEG8EI64V   0xfc00707f
++#define MATCH_VSOXSEG8EI64V  0xec007027
++#define MASK_VSOXSEG8EI64V   0xfc00707f
++
++#define MATCH_VLUXSEG2EI64V  0x24007007
++#define MASK_VLUXSEG2EI64V   0xfc00707f
++#define MATCH_VSUXSEG2EI64V  0x24007027
++#define MASK_VSUXSEG2EI64V   0xfc00707f
++#define MATCH_VLUXSEG3EI64V  0x44007007
++#define MASK_VLUXSEG3EI64V   0xfc00707f
++#define MATCH_VSUXSEG3EI64V  0x44007027
++#define MASK_VSUXSEG3EI64V   0xfc00707f
++#define MATCH_VLUXSEG4EI64V  0x64007007
++#define MASK_VLUXSEG4EI64V   0xfc00707f
++#define MATCH_VSUXSEG4EI64V  0x64007027
++#define MASK_VSUXSEG4EI64V   0xfc00707f
++#define MATCH_VLUXSEG5EI64V  0x84007007
++#define MASK_VLUXSEG5EI64V   0xfc00707f
++#define MATCH_VSUXSEG5EI64V  0x84007027
++#define MASK_VSUXSEG5EI64V   0xfc00707f
++#define MATCH_VLUXSEG6EI64V  0xa4007007
++#define MASK_VLUXSEG6EI64V   0xfc00707f
++#define MATCH_VSUXSEG6EI64V  0xa4007027
++#define MASK_VSUXSEG6EI64V   0xfc00707f
++#define MATCH_VLUXSEG7EI64V  0xc4007007
++#define MASK_VLUXSEG7EI64V   0xfc00707f
++#define MATCH_VSUXSEG7EI64V  0xc4007027
++#define MASK_VSUXSEG7EI64V   0xfc00707f
++#define MATCH_VLUXSEG8EI64V  0xe4007007
++#define MASK_VLUXSEG8EI64V   0xfc00707f
++#define MATCH_VSUXSEG8EI64V  0xe4007027
++#define MASK_VSUXSEG8EI64V   0xfc00707f
++
++#define MATCH_VLSEG2E8FFV  0x21000007
++#define MASK_VLSEG2E8FFV   0xfdf0707f
++#define MATCH_VLSEG3E8FFV  0x41000007
++#define MASK_VLSEG3E8FFV   0xfdf0707f
++#define MATCH_VLSEG4E8FFV  0x61000007
++#define MASK_VLSEG4E8FFV   0xfdf0707f
++#define MATCH_VLSEG5E8FFV  0x81000007
++#define MASK_VLSEG5E8FFV   0xfdf0707f
++#define MATCH_VLSEG6E8FFV  0xa1000007
++#define MASK_VLSEG6E8FFV   0xfdf0707f
++#define MATCH_VLSEG7E8FFV  0xc1000007
++#define MASK_VLSEG7E8FFV   0xfdf0707f
++#define MATCH_VLSEG8E8FFV  0xe1000007
++#define MASK_VLSEG8E8FFV   0xfdf0707f
++
++#define MATCH_VLSEG2E16FFV  0x21005007
++#define MASK_VLSEG2E16FFV   0xfdf0707f
++#define MATCH_VLSEG3E16FFV  0x41005007
++#define MASK_VLSEG3E16FFV   0xfdf0707f
++#define MATCH_VLSEG4E16FFV  0x61005007
++#define MASK_VLSEG4E16FFV   0xfdf0707f
++#define MATCH_VLSEG5E16FFV  0x81005007
++#define MASK_VLSEG5E16FFV   0xfdf0707f
++#define MATCH_VLSEG6E16FFV  0xa1005007
++#define MASK_VLSEG6E16FFV   0xfdf0707f
++#define MATCH_VLSEG7E16FFV  0xc1005007
++#define MASK_VLSEG7E16FFV   0xfdf0707f
++#define MATCH_VLSEG8E16FFV  0xe1005007
++#define MASK_VLSEG8E16FFV   0xfdf0707f
++
++#define MATCH_VLSEG2E32FFV  0x21006007
++#define MASK_VLSEG2E32FFV   0xfdf0707f
++#define MATCH_VLSEG3E32FFV  0x41006007
++#define MASK_VLSEG3E32FFV   0xfdf0707f
++#define MATCH_VLSEG4E32FFV  0x61006007
++#define MASK_VLSEG4E32FFV   0xfdf0707f
++#define MATCH_VLSEG5E32FFV  0x81006007
++#define MASK_VLSEG5E32FFV   0xfdf0707f
++#define MATCH_VLSEG6E32FFV  0xa1006007
++#define MASK_VLSEG6E32FFV   0xfdf0707f
++#define MATCH_VLSEG7E32FFV  0xc1006007
++#define MASK_VLSEG7E32FFV   0xfdf0707f
++#define MATCH_VLSEG8E32FFV  0xe1006007
++#define MASK_VLSEG8E32FFV   0xfdf0707f
++
++#define MATCH_VLSEG2E64FFV  0x21007007
++#define MASK_VLSEG2E64FFV   0xfdf0707f
++#define MATCH_VLSEG3E64FFV  0x41007007
++#define MASK_VLSEG3E64FFV   0xfdf0707f
++#define MATCH_VLSEG4E64FFV  0x61007007
++#define MASK_VLSEG4E64FFV   0xfdf0707f
++#define MATCH_VLSEG5E64FFV  0x81007007
++#define MASK_VLSEG5E64FFV   0xfdf0707f
++#define MATCH_VLSEG6E64FFV  0xa1007007
++#define MASK_VLSEG6E64FFV   0xfdf0707f
++#define MATCH_VLSEG7E64FFV  0xc1007007
++#define MASK_VLSEG7E64FFV   0xfdf0707f
++#define MATCH_VLSEG8E64FFV  0xe1007007
++#define MASK_VLSEG8E64FFV   0xfdf0707f
++
++#define MATCH_VL1RE8V    0x02800007
++#define MASK_VL1RE8V     0xfff0707f
++#define MATCH_VL1RE16V   0x02805007
++#define MASK_VL1RE16V    0xfff0707f
++#define MATCH_VL1RE32V   0x02806007
++#define MASK_VL1RE32V    0xfff0707f
++#define MATCH_VL1RE64V   0x02807007
++#define MASK_VL1RE64V    0xfff0707f
++
++#define MATCH_VL2RE8V    0x22800007
++#define MASK_VL2RE8V     0xfff0707f
++#define MATCH_VL2RE16V   0x22805007
++#define MASK_VL2RE16V    0xfff0707f
++#define MATCH_VL2RE32V   0x22806007
++#define MASK_VL2RE32V    0xfff0707f
++#define MATCH_VL2RE64V   0x22807007
++#define MASK_VL2RE64V    0xfff0707f
++
++#define MATCH_VL4RE8V    0x62800007
++#define MASK_VL4RE8V     0xfff0707f
++#define MATCH_VL4RE16V   0x62805007
++#define MASK_VL4RE16V    0xfff0707f
++#define MATCH_VL4RE32V   0x62806007
++#define MASK_VL4RE32V    0xfff0707f
++#define MATCH_VL4RE64V   0x62807007
++#define MASK_VL4RE64V    0xfff0707f
++
++#define MATCH_VL8RE8V    0xe2800007
++#define MASK_VL8RE8V     0xfff0707f
++#define MATCH_VL8RE16V   0xe2805007
++#define MASK_VL8RE16V    0xfff0707f
++#define MATCH_VL8RE32V   0xe2806007
++#define MASK_VL8RE32V    0xfff0707f
++#define MATCH_VL8RE64V   0xe2807007
++#define MASK_VL8RE64V    0xfff0707f
++
++#define MATCH_VS1RV  0x02800027
++#define MASK_VS1RV   0xfff0707f
++#define MATCH_VS2RV  0x22800027
++#define MASK_VS2RV   0xfff0707f
++#define MATCH_VS4RV  0x62800027
++#define MASK_VS4RV   0xfff0707f
++#define MATCH_VS8RV  0xe2800027
++#define MASK_VS8RV   0xfff0707f
++
++/* Temporary AMO encoding info
++
++width
++010 AMO*.W
++011 AMO*.D
++100 AMO*.Q
++000 VAMO*EI8.V
++101 VAMO*EI16.V
++110 VAMO*EI32.V
++111 VAMO*EI64.V
++
++amoop
++00001 vamoswap
++00000 vamoadd
++00100 vamoxor
++01100 vamoand
++01000 vamoor
++10000 vamomin
++10100 vamomax
++11000 vamominu
++11100 vamomaxu
++
++   31-27 26 25 24-20 19-15 14-12 11-7    6-0
++   amoop wd vm  vs2   rs1  width vs3/vd  opcode
++   00001 x 1 xxxxx xxxxx 110 xxxxx 0101111
++   0000 1x1x xxxx xxxx x110 xxxx x010 1111
++   1111 1010 0000 0000 0111 0000 0111 1111 */
++
++#define MATCH_VAMOADDEI8V   0x0000002f
++#define MASK_VAMOADDEI8V    0xf800707f
++#define MATCH_VAMOSWAPEI8V  0x0800002f
++#define MASK_VAMOSWAPEI8V   0xf800707f
++#define MATCH_VAMOXOREI8V   0x2000002f
++#define MASK_VAMOXOREI8V    0xf800707f
++#define MATCH_VAMOANDEI8V   0x6000002f
++#define MASK_VAMOANDEI8V    0xf800707f
++#define MATCH_VAMOOREI8V    0x4000002f
++#define MASK_VAMOOREI8V     0xf800707f
++#define MATCH_VAMOMINEI8V   0x8000002f
++#define MASK_VAMOMINEI8V    0xf800707f
++#define MATCH_VAMOMAXEI8V   0xa000002f
++#define MASK_VAMOMAXEI8V    0xf800707f
++#define MATCH_VAMOMINUEI8V  0xc000002f
++#define MASK_VAMOMINUEI8V   0xf800707f
++#define MATCH_VAMOMAXUEI8V  0xe000002f
++#define MASK_VAMOMAXUEI8V   0xf800707f
++
++#define MATCH_VAMOADDEI16V   0x0000502f
++#define MASK_VAMOADDEI16V    0xf800707f
++#define MATCH_VAMOSWAPEI16V  0x0800502f
++#define MASK_VAMOSWAPEI16V   0xf800707f
++#define MATCH_VAMOXOREI16V   0x2000502f
++#define MASK_VAMOXOREI16V    0xf800707f
++#define MATCH_VAMOANDEI16V   0x6000502f
++#define MASK_VAMOANDEI16V    0xf800707f
++#define MATCH_VAMOOREI16V    0x4000502f
++#define MASK_VAMOOREI16V     0xf800707f
++#define MATCH_VAMOMINEI16V   0x8000502f
++#define MASK_VAMOMINEI16V    0xf800707f
++#define MATCH_VAMOMAXEI16V   0xa000502f
++#define MASK_VAMOMAXEI16V    0xf800707f
++#define MATCH_VAMOMINUEI16V  0xc000502f
++#define MASK_VAMOMINUEI16V   0xf800707f
++#define MATCH_VAMOMAXUEI16V  0xe000502f
++#define MASK_VAMOMAXUEI16V   0xf800707f
++
++#define MATCH_VAMOADDEI32V   0x0000602f
++#define MASK_VAMOADDEI32V    0xf800707f
++#define MATCH_VAMOSWAPEI32V  0x0800602f
++#define MASK_VAMOSWAPEI32V   0xf800707f
++#define MATCH_VAMOXOREI32V   0x2000602f
++#define MASK_VAMOXOREI32V    0xf800707f
++#define MATCH_VAMOANDEI32V   0x6000602f
++#define MASK_VAMOANDEI32V    0xf800707f
++#define MATCH_VAMOOREI32V    0x4000602f
++#define MASK_VAMOOREI32V     0xf800707f
++#define MATCH_VAMOMINEI32V   0x8000602f
++#define MASK_VAMOMINEI32V    0xf800707f
++#define MATCH_VAMOMAXEI32V   0xa000602f
++#define MASK_VAMOMAXEI32V    0xf800707f
++#define MATCH_VAMOMINUEI32V  0xc000602f
++#define MASK_VAMOMINUEI32V   0xf800707f
++#define MATCH_VAMOMAXUEI32V  0xe000602f
++#define MASK_VAMOMAXUEI32V   0xf800707f
++
++#define MATCH_VAMOADDEI64V   0x0000702f
++#define MASK_VAMOADDEI64V    0xf800707f
++#define MATCH_VAMOSWAPEI64V  0x0800702f
++#define MASK_VAMOSWAPEI64V   0xf800707f
++#define MATCH_VAMOXOREI64V   0x2000702f
++#define MASK_VAMOXOREI64V    0xf800707f
++#define MATCH_VAMOANDEI64V   0x6000702f
++#define MASK_VAMOANDEI64V    0xf800707f
++#define MATCH_VAMOOREI64V    0x4000702f
++#define MASK_VAMOOREI64V     0xf800707f
++#define MATCH_VAMOMINEI64V   0x8000702f
++#define MASK_VAMOMINEI64V    0xf800707f
++#define MATCH_VAMOMAXEI64V   0xa000702f
++#define MASK_VAMOMAXEI64V    0xf800707f
++#define MATCH_VAMOMINUEI64V  0xc000702f
++#define MASK_VAMOMINUEI64V   0xf800707f
++#define MATCH_VAMOMAXUEI64V  0xe000702f
++#define MASK_VAMOMAXUEI64V   0xf800707f
++
++/* Temporary ALU encoding info
++
++funct3
++000 OPIVV vv
++001 OPFVV vv
++010 OPMVV vv
++011 OPIVI vi  simm[4:0]
++100 OPIVX vx  GPR x-reg rs1
++101 OPFVF vf  FP f-reg rs1
++110 OPMVX vx  GPR x-reg rs1
++111 OPCFG si  GPR x-reg rs1 & rs2/imm
++
++INT OPI
++funct6
++000000 vadd
++000001
++000010 vsub
++000011 vrsub
++000100 vminu
++000101 vmin
++000110 vmaxu
++000111 vmax
++001000
++001001 vand
++001010 vor
++001011 vxor
++001100 vrgather
++001101
++001110 vslideup, vrgatherei16
++001111 vslidedown
++010000 vadc
++010001 vmadc
++010010 vsbc
++010011 vmsbc
++010100
++010101
++010110
++010111 vmerge/vmv
++011000 vmseq
++011001 vmsne
++011010 vmsltu
++011011 vmslt
++011100 vmsleu
++011101 vmsle
++011110 vmsgtu
++011111 vmsgt
++100000 vsaddu
++100001 vsadd
++100010 vssubu
++100011 vssub
++100100
++100101 vsll
++100110
++100111 vmv<nf>r (nf = 1, 2, 4, 8)
++101000 vsrl
++101001 vsra
++101010 vssrl
++101011 vssra
++101100 vnsrl
++101101 vnsra
++101110 vnclipu
++101111 vnclip
++110000 vwredsumu
++110001 vwredsum
++110010
++110011
++110100
++110101
++110110
++110111
++111000 vdotu **
++111001 vdot **
++111010
++111011
++111100 vqmaccu
++111101 vqmacc
++111110 vqmaccus
++111111 vqmaccsu
++
++INT OPM
++funct6
++000000 vredsum
++000001 vredand
++000010 vredor
++000011 vredxor
++000100 vredminu
++000101 vredmin
++000110 vredmaxu
++000111 vredmax
++001000 vaaddu
++001001 vaadd
++001010 vasubu
++001011 vasub
++001100
++001101
++001110 vslide1up
++001111 vslide1down
++010000 VRXUNARY0/VWXUNARY0
++010001
++010010 VXUNARY0
++010011
++010100 VMUNARY0
++010101
++010110
++010111 vcompress
++011000 vmandnot
++011001 vmand
++011010 vmor
++011011 vmxor
++011100 vmornot
++011101 vmnand
++011110 vmnor
++011111 vmxnor
++100000 vdivu
++100001 vdiv
++100010 vremu
++100011 vrem
++100100 vmulhu
++100101 vmul
++100110 vmulhsu
++100111 vmulh
++101000
++101001 vmadd
++101010
++101011 vnmsub
++101100
++101101 vmacc
++101110
++101111 vnmsac
++110000 vwaddu
++110001 vwadd
++110010 vwsubu
++110011 vwsub
++110100 vwaddu.w
++110101 vwadd.w
++110110 vwsubu.w
++110111 vwsub.w
++111000 vwmulu
++111001
++111010 vwmulsu
++111011 vwmul
++111100 vwmaccu
++111101 vwmacc
++111110 vwmaccus
++111111 vwmaccsu
++
++VRXUNARY0
++vs2, funct3=X
++00000 vmv.s.x
++
++VWXUNARY0
++vs1, funct3=V
++00000 vmv.x.s
++10000 vpopc
++10001 vfirst
++
++VXUNARY0
++vs1, funct3=V
++00010 vzext.vf8
++00011 vsext.vf8
++00100 vzext.vf4
++00101 vsext.vf4
++00110 vzext.vf2
++00111 vsext.vf2
++
++VMUNARY0
++rs1
++00001 vmsbf
++00010 vmsof
++00011 vmsif
++10000 viota
++10001 vid
++
++VFLOAT
++funct6
++000000 vfadd
++000001 vfredsum
++000010 vfsub
++000011 vfredosum
++000100 vfmin
++000101 vfredmin
++000110 vfmax
++000111 vfredmax
++001000 vfsgnj
++001001 vfsgnn
++001010 vfsgnx
++001011
++001100
++001101
++001110 vfslide1up
++001111 vfslide1down
++010000 VRFUNARY0/VWFUNARY0
++010001
++010010 VFUNARY0
++010011 VFUNARY1
++010100
++010101
++010110
++010111 vfmerge/vfmv
++011000 vmfeq
++011001 vmfle
++011010
++011011 vmflt
++011100 vmfne
++011101 vmfgt
++011110
++011111 vmfge
++100000 vfdiv
++100001 vfrdiv
++100010
++100011
++100100 vfmul
++100101
++100110
++100111 vfrsub
++101000 vfmadd
++101001 vfnmadd
++101010 vfmsub
++101011 vfnmsub
++101100 vfmacc
++101101 vfnmacc
++101110 vfmsac
++101111 vfnmsac
++110000 vfwadd
++110001 vfwredsum
++110010 vfwsub
++110011 vfwredosum
++110100 vfwadd.w
++110101
++110110 vfwsub.w
++110111
++111000 vfwmul
++111001 vfdot
++111010
++111011
++111100 vfwmacc
++111101 vfwnmacc
++111110 vfwmsac
++111111 vfwnmsac
++
++VRFUNARY0
++vs2, funct3=F
++00000 vfmv.s.f
++
++VWFUNARY0
++vs1, funct3=V
++00000 vfmv.f.s
++
++VFUNARY0
++vs1
++00000 vfcvt.xu.f.v
++00001 vfcvt.x.f.v
++00010 vfcvt.f.xu.v
++00011 vfcvt.f.x.v
++00110 vfcvt.rtz.xu.f.v
++00111 vfcvt.rtz.x.f.v
++
++01000 vfwcvt.xu.f.v
++01001 vfwcvt.x.f.v
++01010 vfwcvt.f.xu.v
++01011 vfwcvt.f.x.v
++01100 vfwcvt.f.f.v
++01110 vfwcvt.rtz.xu.f.v
++01111 vfwcvt.rtz.x.f.v
++
++10000 vfncvt.xu.f.w
++10001 vfncvt.x.f.w
++10010 vfncvt.f.xu.w
++10011 vfncvt.f.x.w
++10100 vfncvt.f.f.w
++10101 vfncvt.rod.f.f.w
++10110 vfncvt.rtz.xu.f.v
++10111 vfncvt.rtz.x.f.v
++
++VFUNARY1
++vs1
++00000 vfsqrt.v
++00100 vfrsqrte7.v
++00101 vfrece7.v
++10000 vfclass.v
++
++31-26 25 24-20   19-15     14-12 11-7 6-0
++funct6 VM  VS2  VS1/RS1/IMM funct3 VD   opcode
++010000 x xxxxx 00000 001 xxxxx 1010111
++0100 00xx xxxx 0000 0001 xxxx x101 0111
++*/
++
++#define MATCH_VADDVV  0x00000057
++#define MASK_VADDVV   0xfc00707f
++#define MATCH_VADDVX  0x00004057
++#define MASK_VADDVX   0xfc00707f
++#define MATCH_VADDVI  0x00003057
++#define MASK_VADDVI   0xfc00707f
++#define MATCH_VSUBVV  0x08000057
++#define MASK_VSUBVV   0xfc00707f
++#define MATCH_VSUBVX  0x08004057
++#define MASK_VSUBVX   0xfc00707f
++#define MATCH_VRSUBVX 0x0c004057
++#define MASK_VRSUBVX  0xfc00707f
++#define MATCH_VRSUBVI 0x0c003057
++#define MASK_VRSUBVI  0xfc00707f
++
++#define MATCH_VWCVTXXV  0xc4006057
++#define MASK_VWCVTXXV   0xfc0ff07f
++#define MATCH_VWCVTUXXV 0xc0006057
++#define MASK_VWCVTUXXV  0xfc0ff07f
++
++#define MATCH_VWADDVV  0xc4002057
++#define MASK_VWADDVV   0xfc00707f
++#define MATCH_VWADDVX  0xc4006057
++#define MASK_VWADDVX   0xfc00707f
++#define MATCH_VWSUBVV  0xcc002057
++#define MASK_VWSUBVV   0xfc00707f
++#define MATCH_VWSUBVX  0xcc006057
++#define MASK_VWSUBVX   0xfc00707f
++#define MATCH_VWADDWV  0xd4002057
++#define MASK_VWADDWV   0xfc00707f
++#define MATCH_VWADDWX  0xd4006057
++#define MASK_VWADDWX   0xfc00707f
++#define MATCH_VWSUBWV  0xdc002057
++#define MASK_VWSUBWV   0xfc00707f
++#define MATCH_VWSUBWX  0xdc006057
++#define MASK_VWSUBWX   0xfc00707f
++#define MATCH_VWADDUVV  0xc0002057
++#define MASK_VWADDUVV   0xfc00707f
++#define MATCH_VWADDUVX  0xc0006057
++#define MASK_VWADDUVX   0xfc00707f
++#define MATCH_VWSUBUVV  0xc8002057
++#define MASK_VWSUBUVV   0xfc00707f
++#define MATCH_VWSUBUVX  0xc8006057
++#define MASK_VWSUBUVX   0xfc00707f
++#define MATCH_VWADDUWV  0xd0002057
++#define MASK_VWADDUWV   0xfc00707f
++#define MATCH_VWADDUWX  0xd0006057
++#define MASK_VWADDUWX   0xfc00707f
++#define MATCH_VWSUBUWV  0xd8002057
++#define MASK_VWSUBUWV   0xfc00707f
++#define MATCH_VWSUBUWX  0xd8006057
++#define MASK_VWSUBUWX   0xfc00707f
++
++#define MATCH_VZEXT_VF8 0x48012057
++#define MASK_VZEXT_VF8  0xfc0ff07f
++#define MATCH_VSEXT_VF8 0x4801a057
++#define MASK_VSEXT_VF8  0xfc0ff07f
++#define MATCH_VZEXT_VF4 0x48022057
++#define MASK_VZEXT_VF4  0xfc0ff07f
++#define MATCH_VSEXT_VF4 0x4802a057
++#define MASK_VSEXT_VF4  0xfc0ff07f
++#define MATCH_VZEXT_VF2 0x48032057
++#define MASK_VZEXT_VF2  0xfc0ff07f
++#define MATCH_VSEXT_VF2 0x4803a057
++#define MASK_VSEXT_VF2  0xfc0ff07f
++
++#define MATCH_VADCVVM  0x40000057
++#define MASK_VADCVVM   0xfe00707f
++#define MATCH_VADCVXM  0x40004057
++#define MASK_VADCVXM   0xfe00707f
++#define MATCH_VADCVIM  0x40003057
++#define MASK_VADCVIM   0xfe00707f
++#define MATCH_VMADCVVM 0x44000057
++#define MASK_VMADCVVM  0xfe00707f
++#define MATCH_VMADCVXM 0x44004057
++#define MASK_VMADCVXM  0xfe00707f
++#define MATCH_VMADCVIM 0x44003057
++#define MASK_VMADCVIM  0xfe00707f
++#define MATCH_VMADCVV  0x46000057
++#define MASK_VMADCVV   0xfe00707f
++#define MATCH_VMADCVX  0x46004057
++#define MASK_VMADCVX   0xfe00707f
++#define MATCH_VMADCVI  0x46003057
++#define MASK_VMADCVI   0xfe00707f
++#define MATCH_VSBCVVM  0x48000057
++#define MASK_VSBCVVM   0xfe00707f
++#define MATCH_VSBCVXM  0x48004057
++#define MASK_VSBCVXM   0xfe00707f
++#define MATCH_VMSBCVVM 0x4c000057
++#define MASK_VMSBCVVM  0xfe00707f
++#define MATCH_VMSBCVXM 0x4c004057
++#define MASK_VMSBCVXM  0xfe00707f
++#define MATCH_VMSBCVV  0x4e000057
++#define MASK_VMSBCVV   0xfe00707f
++#define MATCH_VMSBCVX  0x4e004057
++#define MASK_VMSBCVX   0xfe00707f
++
++#define MATCH_VNOTV   0x2c0fb057
++#define MASK_VNOTV    0xfc0ff07f
++
++#define MATCH_VANDVV  0x24000057
++#define MASK_VANDVV   0xfc00707f
++#define MATCH_VANDVX  0x24004057
++#define MASK_VANDVX   0xfc00707f
++#define MATCH_VANDVI  0x24003057
++#define MASK_VANDVI   0xfc00707f
++#define MATCH_VORVV   0x28000057
++#define MASK_VORVV    0xfc00707f
++#define MATCH_VORVX   0x28004057
++#define MASK_VORVX    0xfc00707f
++#define MATCH_VORVI   0x28003057
++#define MASK_VORVI    0xfc00707f
++#define MATCH_VXORVV  0x2c000057
++#define MASK_VXORVV   0xfc00707f
++#define MATCH_VXORVX  0x2c004057
++#define MASK_VXORVX   0xfc00707f
++#define MATCH_VXORVI  0x2c003057
++#define MASK_VXORVI   0xfc00707f
++
++#define MATCH_VSLLVV 0x94000057
++#define MASK_VSLLVV  0xfc00707f
++#define MATCH_VSLLVX 0x94004057
++#define MASK_VSLLVX  0xfc00707f
++#define MATCH_VSLLVI 0x94003057
++#define MASK_VSLLVI  0xfc00707f
++#define MATCH_VSRLVV 0xa0000057
++#define MASK_VSRLVV  0xfc00707f
++#define MATCH_VSRLVX 0xa0004057
++#define MASK_VSRLVX  0xfc00707f
++#define MATCH_VSRLVI 0xa0003057
++#define MASK_VSRLVI  0xfc00707f
++#define MATCH_VSRAVV 0xa4000057
++#define MASK_VSRAVV  0xfc00707f
++#define MATCH_VSRAVX 0xa4004057
++#define MASK_VSRAVX  0xfc00707f
++#define MATCH_VSRAVI 0xa4003057
++#define MASK_VSRAVI  0xfc00707f
++
++#define MATCH_VNCVTXXW 0xb0004057
++#define MASK_VNCVTXXW  0xfc0ff07f
++
++#define MATCH_VNSRLWV  0xb0000057
++#define MASK_VNSRLWV   0xfc00707f
++#define MATCH_VNSRLWX  0xb0004057
++#define MASK_VNSRLWX   0xfc00707f
++#define MATCH_VNSRLWI  0xb0003057
++#define MASK_VNSRLWI   0xfc00707f
++#define MATCH_VNSRAWV  0xb4000057
++#define MASK_VNSRAWV   0xfc00707f
++#define MATCH_VNSRAWX  0xb4004057
++#define MASK_VNSRAWX   0xfc00707f
++#define MATCH_VNSRAWI  0xb4003057
++#define MASK_VNSRAWI   0xfc00707f
++
++#define MATCH_VMSEQVV  0x60000057
++#define MASK_VMSEQVV   0xfc00707f
++#define MATCH_VMSEQVX  0x60004057
++#define MASK_VMSEQVX   0xfc00707f
++#define MATCH_VMSEQVI  0x60003057
++#define MASK_VMSEQVI   0xfc00707f
++#define MATCH_VMSNEVV  0x64000057
++#define MASK_VMSNEVV   0xfc00707f
++#define MATCH_VMSNEVX  0x64004057
++#define MASK_VMSNEVX   0xfc00707f
++#define MATCH_VMSNEVI  0x64003057
++#define MASK_VMSNEVI   0xfc00707f
++#define MATCH_VMSLTVV  0x6c000057
++#define MASK_VMSLTVV   0xfc00707f
++#define MATCH_VMSLTVX  0x6c004057
++#define MASK_VMSLTVX   0xfc00707f
++#define MATCH_VMSLTUVV 0x68000057
++#define MASK_VMSLTUVV  0xfc00707f
++#define MATCH_VMSLTUVX 0x68004057
++#define MASK_VMSLTUVX  0xfc00707f
++#define MATCH_VMSLEVV  0x74000057
++#define MASK_VMSLEVV   0xfc00707f
++#define MATCH_VMSLEVX  0x74004057
++#define MASK_VMSLEVX   0xfc00707f
++#define MATCH_VMSLEVI  0x74003057
++#define MASK_VMSLEVI   0xfc00707f
++#define MATCH_VMSLEUVV 0x70000057
++#define MASK_VMSLEUVV  0xfc00707f
++#define MATCH_VMSLEUVX 0x70004057
++#define MASK_VMSLEUVX  0xfc00707f
++#define MATCH_VMSLEUVI 0x70003057
++#define MASK_VMSLEUVI  0xfc00707f
++#define MATCH_VMSGTVX  0x7c004057
++#define MASK_VMSGTVX   0xfc00707f
++#define MATCH_VMSGTVI  0x7c003057
++#define MASK_VMSGTVI   0xfc00707f
++#define MATCH_VMSGTUVX 0x78004057
++#define MASK_VMSGTUVX  0xfc00707f
++#define MATCH_VMSGTUVI 0x78003057
++#define MASK_VMSGTUVI  0xfc00707f
++
++#define MATCH_VMINVV  0x14000057
++#define MASK_VMINVV   0xfc00707f
++#define MATCH_VMINVX  0x14004057
++#define MASK_VMINVX   0xfc00707f
++#define MATCH_VMAXVV  0x1c000057
++#define MASK_VMAXVV   0xfc00707f
++#define MATCH_VMAXVX  0x1c004057
++#define MASK_VMAXVX   0xfc00707f
++#define MATCH_VMINUVV 0x10000057
++#define MASK_VMINUVV  0xfc00707f
++#define MATCH_VMINUVX 0x10004057
++#define MASK_VMINUVX  0xfc00707f
++#define MATCH_VMAXUVV 0x18000057
++#define MASK_VMAXUVV  0xfc00707f
++#define MATCH_VMAXUVX 0x18004057
++#define MASK_VMAXUVX  0xfc00707f
++
++#define MATCH_VMULVV    0x94002057
++#define MASK_VMULVV     0xfc00707f
++#define MATCH_VMULVX    0x94006057
++#define MASK_VMULVX     0xfc00707f
++#define MATCH_VMULHVV   0x9c002057
++#define MASK_VMULHVV    0xfc00707f
++#define MATCH_VMULHVX   0x9c006057
++#define MASK_VMULHVX    0xfc00707f
++#define MATCH_VMULHUVV  0x90002057
++#define MASK_VMULHUVV   0xfc00707f
++#define MATCH_VMULHUVX  0x90006057
++#define MASK_VMULHUVX   0xfc00707f
++#define MATCH_VMULHSUVV 0x98002057
++#define MASK_VMULHSUVV  0xfc00707f
++#define MATCH_VMULHSUVX 0x98006057
++#define MASK_VMULHSUVX  0xfc00707f
++
++#define MATCH_VWMULVV   0xec002057
++#define MASK_VWMULVV    0xfc00707f
++#define MATCH_VWMULVX   0xec006057
++#define MASK_VWMULVX    0xfc00707f
++#define MATCH_VWMULUVV  0xe0002057
++#define MASK_VWMULUVV   0xfc00707f
++#define MATCH_VWMULUVX  0xe0006057
++#define MASK_VWMULUVX   0xfc00707f
++#define MATCH_VWMULSUVV 0xe8002057
++#define MASK_VWMULSUVV  0xfc00707f
++#define MATCH_VWMULSUVX 0xe8006057
++#define MASK_VWMULSUVX  0xfc00707f
++
++#define MATCH_VMACCVV  0xb4002057
++#define MASK_VMACCVV   0xfc00707f
++#define MATCH_VMACCVX  0xb4006057
++#define MASK_VMACCVX   0xfc00707f
++#define MATCH_VNMSACVV 0xbc002057
++#define MASK_VNMSACVV  0xfc00707f
++#define MATCH_VNMSACVX 0xbc006057
++#define MASK_VNMSACVX  0xfc00707f
++#define MATCH_VMADDVV  0xa4002057
++#define MASK_VMADDVV   0xfc00707f
++#define MATCH_VMADDVX  0xa4006057
++#define MASK_VMADDVX   0xfc00707f
++#define MATCH_VNMSUBVV 0xac002057
++#define MASK_VNMSUBVV  0xfc00707f
++#define MATCH_VNMSUBVX 0xac006057
++#define MASK_VNMSUBVX  0xfc00707f
++
++#define MATCH_VWMACCUVV  0xf0002057
++#define MASK_VWMACCUVV   0xfc00707f
++#define MATCH_VWMACCUVX  0xf0006057
++#define MASK_VWMACCUVX   0xfc00707f
++#define MATCH_VWMACCVV   0xf4002057
++#define MASK_VWMACCVV    0xfc00707f
++#define MATCH_VWMACCVX   0xf4006057
++#define MASK_VWMACCVX    0xfc00707f
++#define MATCH_VWMACCSUVV 0xfc002057
++#define MASK_VWMACCSUVV  0xfc00707f
++#define MATCH_VWMACCSUVX 0xfc006057
++#define MASK_VWMACCSUVX  0xfc00707f
++#define MATCH_VWMACCUSVX 0xf8006057
++#define MASK_VWMACCUSVX  0xfc00707f
++
++#define MATCH_VQMACCUVV  0xf0000057
++#define MASK_VQMACCUVV   0xfc00707f
++#define MATCH_VQMACCUVX  0xf0004057
++#define MASK_VQMACCUVX   0xfc00707f
++#define MATCH_VQMACCVV   0xf4000057
++#define MASK_VQMACCVV    0xfc00707f
++#define MATCH_VQMACCVX   0xf4004057
++#define MASK_VQMACCVX    0xfc00707f
++#define MATCH_VQMACCSUVV 0xfc000057
++#define MASK_VQMACCSUVV  0xfc00707f
++#define MATCH_VQMACCSUVX 0xfc004057
++#define MASK_VQMACCSUVX  0xfc00707f
++#define MATCH_VQMACCUSVX 0xf8004057
++#define MASK_VQMACCUSVX  0xfc00707f
++
++#define MATCH_VDIVVV  0x84002057
++#define MASK_VDIVVV   0xfc00707f
++#define MATCH_VDIVVX  0x84006057
++#define MASK_VDIVVX   0xfc00707f
++#define MATCH_VDIVUVV 0x80002057
++#define MASK_VDIVUVV  0xfc00707f
++#define MATCH_VDIVUVX 0x80006057
++#define MASK_VDIVUVX  0xfc00707f
++#define MATCH_VREMVV  0x8c002057
++#define MASK_VREMVV   0xfc00707f
++#define MATCH_VREMVX  0x8c006057
++#define MASK_VREMVX   0xfc00707f
++#define MATCH_VREMUVV 0x88002057
++#define MASK_VREMUVV  0xfc00707f
++#define MATCH_VREMUVX 0x88006057
++#define MASK_VREMUVX  0xfc00707f
++
++#define MATCH_VMERGEVVM 0x5c000057
++#define MASK_VMERGEVVM  0xfe00707f
++#define MATCH_VMERGEVXM 0x5c004057
++#define MASK_VMERGEVXM  0xfe00707f
++#define MATCH_VMERGEVIM 0x5c003057
++#define MASK_VMERGEVIM  0xfe00707f
++
++#define MATCH_VMVVV    0x5e000057
++#define MASK_VMVVV     0xfff0707f
++#define MATCH_VMVVX    0x5e004057
++#define MASK_VMVVX     0xfff0707f
++#define MATCH_VMVVI    0x5e003057
++#define MASK_VMVVI     0xfff0707f
++
++#define MATCH_VSADDUVV 0x80000057
++#define MASK_VSADDUVV  0xfc00707f
++#define MATCH_VSADDUVX 0x80004057
++#define MASK_VSADDUVX  0xfc00707f
++#define MATCH_VSADDUVI 0x80003057
++#define MASK_VSADDUVI  0xfc00707f
++#define MATCH_VSADDVV  0x84000057
++#define MASK_VSADDVV   0xfc00707f
++#define MATCH_VSADDVX  0x84004057
++#define MASK_VSADDVX   0xfc00707f
++#define MATCH_VSADDVI  0x84003057
++#define MASK_VSADDVI   0xfc00707f
++#define MATCH_VSSUBUVV 0x88000057
++#define MASK_VSSUBUVV  0xfc00707f
++#define MATCH_VSSUBUVX 0x88004057
++#define MASK_VSSUBUVX  0xfc00707f
++#define MATCH_VSSUBVV  0x8c000057
++#define MASK_VSSUBVV   0xfc00707f
++#define MATCH_VSSUBVX  0x8c004057
++#define MASK_VSSUBVX   0xfc00707f
++
++#define MATCH_VAADDUVV 0x20002057
++#define MASK_VAADDUVV  0xfc00707f
++#define MATCH_VAADDUVX 0x20006057
++#define MASK_VAADDUVX  0xfc00707f
++#define MATCH_VAADDVV  0x24002057
++#define MASK_VAADDVV   0xfc00707f
++#define MATCH_VAADDVX  0x24006057
++#define MASK_VAADDVX   0xfc00707f
++#define MATCH_VASUBUVV 0x28002057
++#define MASK_VASUBUVV  0xfc00707f
++#define MATCH_VASUBUVX 0x28006057
++#define MASK_VASUBUVX  0xfc00707f
++#define MATCH_VASUBVV  0x2c002057
++#define MASK_VASUBVV   0xfc00707f
++#define MATCH_VASUBVX  0x2c006057
++#define MASK_VASUBVX   0xfc00707f
++
++#define MATCH_VSMULVV  0x9c000057
++#define MASK_VSMULVV   0xfc00707f
++#define MATCH_VSMULVX  0x9c004057
++#define MASK_VSMULVX   0xfc00707f
++
++#define MATCH_VSSRLVV   0xa8000057
++#define MASK_VSSRLVV    0xfc00707f
++#define MATCH_VSSRLVX   0xa8004057
++#define MASK_VSSRLVX    0xfc00707f
++#define MATCH_VSSRLVI   0xa8003057
++#define MASK_VSSRLVI    0xfc00707f
++#define MATCH_VSSRAVV   0xac000057
++#define MASK_VSSRAVV    0xfc00707f
++#define MATCH_VSSRAVX   0xac004057
++#define MASK_VSSRAVX    0xfc00707f
++#define MATCH_VSSRAVI   0xac003057
++#define MASK_VSSRAVI    0xfc00707f
++
++#define MATCH_VNCLIPUWV 0xb8000057
++#define MASK_VNCLIPUWV  0xfc00707f
++#define MATCH_VNCLIPUWX 0xb8004057
++#define MASK_VNCLIPUWX  0xfc00707f
++#define MATCH_VNCLIPUWI 0xb8003057
++#define MASK_VNCLIPUWI  0xfc00707f
++#define MATCH_VNCLIPWV  0xbc000057
++#define MASK_VNCLIPWV   0xfc00707f
++#define MATCH_VNCLIPWX  0xbc004057
++#define MASK_VNCLIPWX   0xfc00707f
++#define MATCH_VNCLIPWI  0xbc003057
++#define MASK_VNCLIPWI   0xfc00707f
++
++#define MATCH_VFADDVV  0x00001057
++#define MASK_VFADDVV   0xfc00707f
++#define MATCH_VFADDVF  0x00005057
++#define MASK_VFADDVF   0xfc00707f
++#define MATCH_VFSUBVV  0x08001057
++#define MASK_VFSUBVV   0xfc00707f
++#define MATCH_VFSUBVF  0x08005057
++#define MASK_VFSUBVF   0xfc00707f
++#define MATCH_VFRSUBVF 0x9c005057
++#define MASK_VFRSUBVF  0xfc00707f
++
++#define MATCH_VFWADDVV  0xc0001057
++#define MASK_VFWADDVV   0xfc00707f
++#define MATCH_VFWADDVF  0xc0005057
++#define MASK_VFWADDVF   0xfc00707f
++#define MATCH_VFWSUBVV  0xc8001057
++#define MASK_VFWSUBVV   0xfc00707f
++#define MATCH_VFWSUBVF  0xc8005057
++#define MASK_VFWSUBVF   0xfc00707f
++#define MATCH_VFWADDWV  0xd0001057
++#define MASK_VFWADDWV   0xfc00707f
++#define MATCH_VFWADDWF  0xd0005057
++#define MASK_VFWADDWF   0xfc00707f
++#define MATCH_VFWSUBWV  0xd8001057
++#define MASK_VFWSUBWV   0xfc00707f
++#define MATCH_VFWSUBWF  0xd8005057
++#define MASK_VFWSUBWF   0xfc00707f
++
++#define MATCH_VFMULVV  0x90001057
++#define MASK_VFMULVV   0xfc00707f
++#define MATCH_VFMULVF  0x90005057
++#define MASK_VFMULVF   0xfc00707f
++#define MATCH_VFDIVVV  0x80001057
++#define MASK_VFDIVVV   0xfc00707f
++#define MATCH_VFDIVVF  0x80005057
++#define MASK_VFDIVVF   0xfc00707f
++#define MATCH_VFRDIVVF 0x84005057
++#define MASK_VFRDIVVF  0xfc00707f
++
++#define MATCH_VFWMULVV 0xe0001057
++#define MASK_VFWMULVV  0xfc00707f
++#define MATCH_VFWMULVF 0xe0005057
++#define MASK_VFWMULVF  0xfc00707f
++
++#define MATCH_VFMADDVV  0xa0001057
++#define MASK_VFMADDVV   0xfc00707f
++#define MATCH_VFMADDVF  0xa0005057
++#define MASK_VFMADDVF   0xfc00707f
++#define MATCH_VFNMADDVV 0xa4001057
++#define MASK_VFNMADDVV  0xfc00707f
++#define MATCH_VFNMADDVF 0xa4005057
++#define MASK_VFNMADDVF  0xfc00707f
++#define MATCH_VFMSUBVV  0xa8001057
++#define MASK_VFMSUBVV   0xfc00707f
++#define MATCH_VFMSUBVF  0xa8005057
++#define MASK_VFMSUBVF   0xfc00707f
++#define MATCH_VFNMSUBVV 0xac001057
++#define MASK_VFNMSUBVV  0xfc00707f
++#define MATCH_VFNMSUBVF 0xac005057
++#define MASK_VFNMSUBVF  0xfc00707f
++#define MATCH_VFMACCVV  0xb0001057
++#define MASK_VFMACCVV   0xfc00707f
++#define MATCH_VFMACCVF  0xb0005057
++#define MASK_VFMACCVF   0xfc00707f
++#define MATCH_VFNMACCVV 0xb4001057
++#define MASK_VFNMACCVV  0xfc00707f
++#define MATCH_VFNMACCVF 0xb4005057
++#define MASK_VFNMACCVF  0xfc00707f
++#define MATCH_VFMSACVV  0xb8001057
++#define MASK_VFMSACVV   0xfc00707f
++#define MATCH_VFMSACVF  0xb8005057
++#define MASK_VFMSACVF   0xfc00707f
++#define MATCH_VFNMSACVV 0xbc001057
++#define MASK_VFNMSACVV  0xfc00707f
++#define MATCH_VFNMSACVF 0xbc005057
++#define MASK_VFNMSACVF  0xfc00707f
++
++#define MATCH_VFWMACCVV  0xf0001057
++#define MASK_VFWMACCVV   0xfc00707f
++#define MATCH_VFWMACCVF  0xf0005057
++#define MASK_VFWMACCVF   0xfc00707f
++#define MATCH_VFWNMACCVV 0xf4001057
++#define MASK_VFWNMACCVV  0xfc00707f
++#define MATCH_VFWNMACCVF 0xf4005057
++#define MASK_VFWNMACCVF  0xfc00707f
++#define MATCH_VFWMSACVV  0xf8001057
++#define MASK_VFWMSACVV   0xfc00707f
++#define MATCH_VFWMSACVF  0xf8005057
++#define MASK_VFWMSACVF   0xfc00707f
++#define MATCH_VFWNMSACVV 0xfc001057
++#define MASK_VFWNMSACVV  0xfc00707f
++#define MATCH_VFWNMSACVF 0xfc005057
++#define MASK_VFWNMSACVF  0xfc00707f
++
++#define MATCH_VFSQRTV    0x4c001057
++#define MASK_VFSQRTV     0xfc0ff07f
++#define MATCH_VFRSQRT7V  0x4c021057
++#define MASK_VFRSQRT7V   0xfc0ff07f
++#define MATCH_VFREC7V    0x4c029057
++#define MASK_VFREC7V     0xfc0ff07f
++#define MATCH_VFCLASSV   0x4c081057
++#define MASK_VFCLASSV    0xfc0ff07f
++
++#define MATCH_VFMINVV  0x10001057
++#define MASK_VFMINVV   0xfc00707f
++#define MATCH_VFMINVF  0x10005057
++#define MASK_VFMINVF   0xfc00707f
++#define MATCH_VFMAXVV  0x18001057
++#define MASK_VFMAXVV   0xfc00707f
++#define MATCH_VFMAXVF  0x18005057
++#define MASK_VFMAXVF   0xfc00707f
++
++#define MATCH_VFSGNJVV  0x20001057
++#define MASK_VFSGNJVV   0xfc00707f
++#define MATCH_VFSGNJVF  0x20005057
++#define MASK_VFSGNJVF   0xfc00707f
++#define MATCH_VFSGNJNVV 0x24001057
++#define MASK_VFSGNJNVV  0xfc00707f
++#define MATCH_VFSGNJNVF 0x24005057
++#define MASK_VFSGNJNVF  0xfc00707f
++#define MATCH_VFSGNJXVV 0x28001057
++#define MASK_VFSGNJXVV  0xfc00707f
++#define MATCH_VFSGNJXVF 0x28005057
++#define MASK_VFSGNJXVF  0xfc00707f
++
++#define MATCH_VMFEQVV   0x60001057
++#define MASK_VMFEQVV    0xfc00707f
++#define MATCH_VMFEQVF   0x60005057
++#define MASK_VMFEQVF    0xfc00707f
++#define MATCH_VMFNEVV   0x70001057
++#define MASK_VMFNEVV    0xfc00707f
++#define MATCH_VMFNEVF   0x70005057
++#define MASK_VMFNEVF    0xfc00707f
++#define MATCH_VMFLTVV   0x6c001057
++#define MASK_VMFLTVV    0xfc00707f
++#define MATCH_VMFLTVF   0x6c005057
++#define MASK_VMFLTVF    0xfc00707f
++#define MATCH_VMFLEVV  0x64001057
++#define MASK_VMFLEVV   0xfc00707f
++#define MATCH_VMFLEVF  0x64005057
++#define MASK_VMFLEVF   0xfc00707f
++#define MATCH_VMFGTVF   0x74005057
++#define MASK_VMFGTVF    0xfc00707f
++#define MATCH_VMFGEVF  0x7c005057
++#define MASK_VMFGEVF   0xfc00707f
++
++#define MATCH_VFMERGEVFM 0x5c005057
++#define MASK_VFMERGEVFM  0xfe00707f
++#define MATCH_VFMVVF     0x5e005057
++#define MASK_VFMVVF      0xfff0707f
++
++#define MATCH_VFCVTXUFV 0x48001057
++#define MASK_VFCVTXUFV  0xfc0ff07f
++#define MATCH_VFCVTXFV 0x48009057
++#define MASK_VFCVTXFV  0xfc0ff07f
++#define MATCH_VFCVTFXUV 0x48011057
++#define MASK_VFCVTFXUV  0xfc0ff07f
++#define MATCH_VFCVTFXV 0x48019057
++#define MASK_VFCVTFXV  0xfc0ff07f
++#define MATCH_VFCVTRTZXUFV 0x48031057
++#define MASK_VFCVTRTZXUFV  0xfc0ff07f
++#define MATCH_VFCVTRTZXFV 0x48039057
++#define MASK_VFCVTRTZXFV  0xfc0ff07f
++#define MATCH_VFWCVTXUFV 0x48041057
++#define MASK_VFWCVTXUFV  0xfc0ff07f
++#define MATCH_VFWCVTXFV 0x48049057
++#define MASK_VFWCVTXFV  0xfc0ff07f
++#define MATCH_VFWCVTFXUV 0x48051057
++#define MASK_VFWCVTFXUV  0xfc0ff07f
++#define MATCH_VFWCVTFXV 0x48059057
++#define MASK_VFWCVTFXV  0xfc0ff07f
++#define MATCH_VFWCVTFFV 0x48061057
++#define MASK_VFWCVTFFV  0xfc0ff07f
++#define MATCH_VFWCVTRTZXUFV 0x48071057
++#define MASK_VFWCVTRTZXUFV  0xfc0ff07f
++#define MATCH_VFWCVTRTZXFV 0x48079057
++#define MASK_VFWCVTRTZXFV  0xfc0ff07f
++#define MATCH_VFNCVTXUFW 0x48081057
++#define MASK_VFNCVTXUFW  0xfc0ff07f
++#define MATCH_VFNCVTXFW 0x48089057
++#define MASK_VFNCVTXFW  0xfc0ff07f
++#define MATCH_VFNCVTFXUW 0x48091057
++#define MASK_VFNCVTFXUW  0xfc0ff07f
++#define MATCH_VFNCVTFXW 0x48099057
++#define MASK_VFNCVTFXW  0xfc0ff07f
++#define MATCH_VFNCVTFFW 0x480a1057
++#define MASK_VFNCVTFFW  0xfc0ff07f
++#define MATCH_VFNCVTRODFFW 0x480a9057
++#define MASK_VFNCVTRODFFW  0xfc0ff07f
++#define MATCH_VFNCVTRTZXUFW 0x480b1057
++#define MASK_VFNCVTRTZXUFW  0xfc0ff07f
++#define MATCH_VFNCVTRTZXFW 0x480b9057
++#define MASK_VFNCVTRTZXFW  0xfc0ff07f
++
++#define MATCH_VREDSUMVS  0x00002057
++#define MASK_VREDSUMVS   0xfc00707f
++#define MATCH_VREDMAXVS  0x1c002057
++#define MASK_VREDMAXVS   0xfc00707f
++#define MATCH_VREDMAXUVS 0x18002057
++#define MASK_VREDMAXUVS  0xfc00707f
++#define MATCH_VREDMINVS  0x14002057
++#define MASK_VREDMINVS   0xfc00707f
++#define MATCH_VREDMINUVS 0x10002057
++#define MASK_VREDMINUVS  0xfc00707f
++#define MATCH_VREDANDVS  0x04002057
++#define MASK_VREDANDVS   0xfc00707f
++#define MATCH_VREDORVS   0x08002057
++#define MASK_VREDORVS    0xfc00707f
++#define MATCH_VREDXORVS  0x0c002057
++#define MASK_VREDXORVS   0xfc00707f
++
++#define MATCH_VWREDSUMUVS 0xc0000057
++#define MASK_VWREDSUMUVS  0xfc00707f
++#define MATCH_VWREDSUMVS  0xc4000057
++#define MASK_VWREDSUMVS   0xfc00707f
++
++#define MATCH_VFREDOSUMVS 0x0c001057
++#define MASK_VFREDOSUMVS  0xfc00707f
++#define MATCH_VFREDSUMVS  0x04001057
++#define MASK_VFREDSUMVS   0xfc00707f
++#define MATCH_VFREDMAXVS  0x1c001057
++#define MASK_VFREDMAXVS   0xfc00707f
++#define MATCH_VFREDMINVS  0x14001057
++#define MASK_VFREDMINVS   0xfc00707f
++
++#define MATCH_VFWREDOSUMVS 0xcc001057
++#define MASK_VFWREDOSUMVS  0xfc00707f
++#define MATCH_VFWREDSUMVS  0xc4001057
++#define MASK_VFWREDSUMVS   0xfc00707f
++
++#define MATCH_VMANDMM    0x66002057
++#define MASK_VMANDMM     0xfe00707f
++#define MATCH_VMNANDMM   0x76002057
++#define MASK_VMNANDMM    0xfe00707f
++#define MATCH_VMANDNOTMM 0x62002057
++#define MASK_VMANDNOTMM  0xfe00707f
++#define MATCH_VMXORMM    0x6e002057
++#define MASK_VMXORMM     0xfe00707f
++#define MATCH_VMORMM     0x6a002057
++#define MASK_VMORMM      0xfe00707f
++#define MATCH_VMNORMM    0x7a002057
++#define MASK_VMNORMM     0xfe00707f
++#define MATCH_VMORNOTMM  0x72002057
++#define MASK_VMORNOTMM   0xfe00707f
++#define MATCH_VMXNORMM   0x7e002057
++#define MASK_VMXNORMM    0xfe00707f
++
++#define MATCH_VPOPCM   0x40082057
++#define MASK_VPOPCM    0xfc0ff07f
++#define MATCH_VFIRSTM  0x4008a057
++#define MASK_VFIRSTM   0xfc0ff07f
++
++#define MATCH_VMSBFM   0x5000a057
++#define MASK_VMSBFM    0xfc0ff07f
++#define MATCH_VMSIFM   0x5001a057
++#define MASK_VMSIFM    0xfc0ff07f
++#define MATCH_VMSOFM   0x50012057
++#define MASK_VMSOFM    0xfc0ff07f
++#define MATCH_VIOTAM   0x50082057
++#define MASK_VIOTAM    0xfc0ff07f
++#define MATCH_VIDV     0x5008a057
++#define MASK_VIDV      0xfdfff07f
++
++#define MATCH_VMVXS    0x42002057
++#define MASK_VMVXS     0xfe0ff07f
++#define MATCH_VMVSX    0x42006057
++#define MASK_VMVSX     0xfff0707f
++
++#define MATCH_VFMVFS   0x42001057
++#define MASK_VFMVFS    0xfe0ff07f
++#define MATCH_VFMVSF   0x42005057
++#define MASK_VFMVSF    0xfff0707f
++
++#define MATCH_VSLIDEUPVX   0x38004057
++#define MASK_VSLIDEUPVX    0xfc00707f
++#define MATCH_VSLIDEUPVI   0x38003057
++#define MASK_VSLIDEUPVI    0xfc00707f
++#define MATCH_VSLIDEDOWNVX 0x3c004057
++#define MASK_VSLIDEDOWNVX  0xfc00707f
++#define MATCH_VSLIDEDOWNVI 0x3c003057
++#define MASK_VSLIDEDOWNVI  0xfc00707f
++
++#define MATCH_VSLIDE1UPVX   0x38006057
++#define MASK_VSLIDE1UPVX    0xfc00707f
++#define MATCH_VSLIDE1DOWNVX 0x3c006057
++#define MASK_VSLIDE1DOWNVX  0xfc00707f
++
++#define MATCH_VFSLIDE1UPVF   0x38005057
++#define MASK_VFSLIDE1UPVF    0xfc00707f
++#define MATCH_VFSLIDE1DOWNVF 0x3c005057
++#define MASK_VFSLIDE1DOWNVF  0xfc00707f
++
++#define MATCH_VRGATHERVV      0x30000057
++#define MASK_VRGATHERVV       0xfc00707f
++#define MATCH_VRGATHERVX      0x30004057
++#define MASK_VRGATHERVX       0xfc00707f
++#define MATCH_VRGATHERVI      0x30003057
++#define MASK_VRGATHERVI       0xfc00707f
++#define MATCH_VRGATHEREI16VV  0x38000057
++#define MASK_VRGATHEREI16VV   0xfc00707f
++
++#define MATCH_VCOMPRESSVM   0x5e002057
++#define MASK_VCOMPRESSVM    0xfe00707f
++
++#define MATCH_VMV1RV 0x9e003057
++#define MASK_VMV1RV  0xfe0ff07f
++#define MATCH_VMV2RV 0x9e00b057
++#define MASK_VMV2RV  0xfe0ff07f
++#define MATCH_VMV4RV 0x9e01b057
++#define MASK_VMV4RV  0xfe0ff07f
++#define MATCH_VMV8RV 0x9e03b057
++#define MASK_VMV8RV  0xfe0ff07f
++
++#define MATCH_VDOTVV    0xe4000057
++#define MASK_VDOTVV     0xfc00707f
++#define MATCH_VDOTUVV   0xe0000057
++#define MASK_VDOTUVV    0xfc00707f
++#define MATCH_VFDOTVV   0xe4001057
++#define MASK_VFDOTVV    0xfc00707f
++/* END RVV */
++
+ #define MATCH_CUSTOM0 0xb
+ #define MASK_CUSTOM0  0x707f
+ #define MATCH_CUSTOM0_RS1 0x200b
+@@ -830,6 +2896,13 @@
+ #define CSR_FFLAGS 0x1
+ #define CSR_FRM 0x2
+ #define CSR_FCSR 0x3
++#define CSR_VSTART 0x008
++#define CSR_VXSAT 0x009
++#define CSR_VXRM 0x00a
++#define CSR_VCSR 0x00f
++#define CSR_VL 0xc20
++#define CSR_VTYPE 0xc21
++#define CSR_VLENB 0xc22
+ #define CSR_DCSR 0x7b0
+ #define CSR_DPC 0x7b1
+ #define CSR_DSCRATCH0 0x7b2
+@@ -912,6 +2985,7 @@ DECLARE_INSN(sb, MATCH_SB, MASK_SB)
+ DECLARE_INSN(sh, MATCH_SH, MASK_SH)
+ DECLARE_INSN(sw, MATCH_SW, MASK_SW)
+ DECLARE_INSN(sd, MATCH_SD, MASK_SD)
++DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE)
+ DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
+ DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
+ DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
+@@ -959,6 +3033,12 @@ DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
+ DECLARE_INSN(sfence_vm, MATCH_SFENCE_VM, MASK_SFENCE_VM)
+ DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA)
+ DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
++/* Custom CSRs instruction */
++DECLARE_INSN(cflush_d_l1, MATCH_CFLUSH_D_L1, MASK_CFLUSH_D_L1)
++DECLARE_INSN(cdiscard_d_l1, MATCH_CDISCARD_D_L1, MASK_CDISCARD_D_L1)
++DECLARE_INSN(cflush_d_l2, MATCH_CFLUSH_D_L2, MASK_CFLUSH_D_L2)
++DECLARE_INSN(cdiscard_d_l2, MATCH_CDISCARD_D_L2, MASK_CDISCARD_D_L2)
++/* end */
+ DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
+ DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
+ DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
+@@ -1043,6 +3123,109 @@ DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU)
+ DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L)
+ DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU)
+ DECLARE_INSN(fmv_q_x, MATCH_FMV_Q_X, MASK_FMV_Q_X)
++DECLARE_INSN(clz, MATCH_CLZ, MASK_CLZ)
++DECLARE_INSN(ctz, MATCH_CTZ, MASK_CTZ)
++DECLARE_INSN(cpop, MATCH_CPOP, MASK_CPOP)
++DECLARE_INSN(min, MATCH_MIN, MASK_MIN)
++DECLARE_INSN(minu, MATCH_MINU, MASK_MINU)
++DECLARE_INSN(max, MATCH_MAX, MASK_MAX)
++DECLARE_INSN(maxu, MATCH_MAXU, MASK_MAXU)
++DECLARE_INSN(sext_b, MATCH_SEXT_B, MASK_SEXT_B)
++DECLARE_INSN(sext_h, MATCH_SEXT_H, MASK_SEXT_H)
++DECLARE_INSN(andn, MATCH_ANDN, MASK_ANDN)
++DECLARE_INSN(orn, MATCH_ORN, MASK_ORN)
++DECLARE_INSN(xnor, MATCH_XNOR, MASK_XNOR)
++DECLARE_INSN(rol, MATCH_ROL, MASK_ROL)
++DECLARE_INSN(ror, MATCH_ROR, MASK_ROR)
++DECLARE_INSN(rori, MATCH_RORI, MASK_RORI)
++DECLARE_INSN(clzw, MATCH_CLZW, MASK_CLZW)
++DECLARE_INSN(ctzw, MATCH_CTZW, MASK_CTZW)
++DECLARE_INSN(cpopw, MATCH_CPOPW, MASK_CPOPW)
++DECLARE_INSN(rolw, MATCH_ROLW, MASK_ROLW)
++DECLARE_INSN(rorw, MATCH_RORW, MASK_RORW)
++DECLARE_INSN(roriw, MATCH_RORIW, MASK_RORIW)
++DECLARE_INSN(sh1add, MATCH_SH1ADD, MASK_SH1ADD)
++DECLARE_INSN(sh2add, MATCH_SH2ADD, MASK_SH2ADD)
++DECLARE_INSN(sh3add, MATCH_SH3ADD, MASK_SH3ADD)
++DECLARE_INSN(sh1add_uw, MATCH_SH1ADD_UW, MASK_SH1ADD_UW)
++DECLARE_INSN(sh2add_uw, MATCH_SH2ADD_UW, MASK_SH2ADD_UW)
++DECLARE_INSN(sh3add_uw, MATCH_SH3ADD_UW, MASK_SH3ADD_UW)
++DECLARE_INSN(add_uw, MATCH_ADD_UW, MASK_ADD_UW)
++DECLARE_INSN(slli_uw, MATCH_SLLI_UW, MASK_SLLI_UW)
++DECLARE_INSN(clmul, MATCH_CLMUL, MASK_CLMUL)
++DECLARE_INSN(clmulh, MATCH_CLMULH, MASK_CLMULH)
++DECLARE_INSN(clmulr, MATCH_CLMULR, MASK_CLMULR)
++DECLARE_INSN(pack, MATCH_PACK, MASK_PACK)
++DECLARE_INSN(packu, MATCH_PACKU, MASK_PACKU)
++DECLARE_INSN(packh, MATCH_PACKH, MASK_PACKH)
++DECLARE_INSN(packw, MATCH_PACKW, MASK_PACKW)
++DECLARE_INSN(packuw, MATCH_PACKUW, MASK_PACKUW)
++DECLARE_INSN(grev, MATCH_GREV, MASK_GREV)
++DECLARE_INSN(grevi, MATCH_GREVI, MASK_GREVI)
++DECLARE_INSN(grevw, MATCH_GREVW, MASK_GREVW)
++DECLARE_INSN(greviw, MATCH_GREVIW, MASK_GREVIW)
++DECLARE_INSN(gorc, MATCH_GORC, MASK_GORC)
++DECLARE_INSN(gorci, MATCH_GORCI, MASK_GORCI)
++DECLARE_INSN(gorcw, MATCH_GORCW, MASK_GORCW)
++DECLARE_INSN(gorciw, MATCH_GORCIW, MASK_GORCIW)
++DECLARE_INSN(shfl, MATCH_SHFL, MASK_SHFL)
++DECLARE_INSN(shfli, MATCH_SHFLI, MASK_SHFLI)
++DECLARE_INSN(shflw, MATCH_SHFLW, MASK_SHFLW)
++DECLARE_INSN(unshfl, MATCH_UNSHFL, MASK_UNSHFL)
++DECLARE_INSN(unshfli, MATCH_UNSHFLI, MASK_UNSHFLI)
++DECLARE_INSN(unshflw, MATCH_UNSHFLW, MASK_UNSHFLW)
++DECLARE_INSN(xperm_n, MATCH_XPERM_N, MASK_XPERM_N)
++DECLARE_INSN(xperm_b, MATCH_XPERM_B, MASK_XPERM_B)
++DECLARE_INSN(xperm_h, MATCH_XPERM_H, MASK_XPERM_H)
++DECLARE_INSN(xperm_w, MATCH_XPERM_W, MASK_XPERM_W)
++DECLARE_INSN(bset, MATCH_BSET, MASK_BSET)
++DECLARE_INSN(bclr, MATCH_BCLR, MASK_BCLR)
++DECLARE_INSN(binv, MATCH_BINV, MASK_BINV)
++DECLARE_INSN(bext, MATCH_BEXT, MASK_BEXT)
++DECLARE_INSN(bcompress, MATCH_BCOMPRESS, MASK_BCOMPRESS)
++DECLARE_INSN(bdecompress, MATCH_BDECOMPRESS, MASK_BDECOMPRESS)
++DECLARE_INSN(bseti, MATCH_BSETI, MASK_BSETI)
++DECLARE_INSN(bclri, MATCH_BCLRI, MASK_BCLRI)
++DECLARE_INSN(binvi, MATCH_BINVI, MASK_BINVI)
++DECLARE_INSN(bexti, MATCH_BEXTI, MASK_BEXTI)
++DECLARE_INSN(bsetw, MATCH_BSETW, MASK_BSETW)
++DECLARE_INSN(bclrw, MATCH_BCLRW, MASK_BCLRW)
++DECLARE_INSN(binvw, MATCH_BINVW, MASK_BINVW)
++DECLARE_INSN(bextw, MATCH_BEXTW, MASK_BEXTW)
++DECLARE_INSN(bcompressw, MATCH_BCOMPRESSW, MASK_BCOMPRESSW)
++DECLARE_INSN(bdecompressw, MATCH_BDECOMPRESSW, MASK_BDECOMPRESSW)
++DECLARE_INSN(bsetiw, MATCH_BSETIW, MASK_BSETIW)
++DECLARE_INSN(bclriw, MATCH_BCLRIW, MASK_BCLRIW)
++DECLARE_INSN(binviw, MATCH_BINVIW, MASK_BINVIW)
++DECLARE_INSN(slo, MATCH_SLO, MASK_SLO)
++DECLARE_INSN(sro, MATCH_SRO, MASK_SRO)
++DECLARE_INSN(sloi, MATCH_SLOI, MASK_SLOI)
++DECLARE_INSN(sroi, MATCH_SROI, MASK_SROI)
++DECLARE_INSN(slow, MATCH_SLOW, MASK_SLOW)
++DECLARE_INSN(srow, MATCH_SROW, MASK_SROW)
++DECLARE_INSN(sloiw, MATCH_SLOIW, MASK_SLOIW)
++DECLARE_INSN(sroiw, MATCH_SROIW, MASK_SROIW)
++DECLARE_INSN(bfp, MATCH_BFP, MASK_BFP)
++DECLARE_INSN(bfpw, MATCH_BFPW, MASK_BFPW)
++DECLARE_INSN(bmator, MATCH_BMATOR, MASK_BMATOR)
++DECLARE_INSN(bmatxor, MATCH_BMATXOR, MASK_BMATXOR)
++DECLARE_INSN(bmatflip, MATCH_BMATFLIP, MASK_BMATFLIP)
++DECLARE_INSN(crc32_b, MATCH_CRC32_B, MASK_CRC32_B)
++DECLARE_INSN(crc32_h, MATCH_CRC32_H, MASK_CRC32_H)
++DECLARE_INSN(crc32_w, MATCH_CRC32_W, MASK_CRC32_W)
++DECLARE_INSN(crc32c_b, MATCH_CRC32C_B, MASK_CRC32C_B)
++DECLARE_INSN(crc32c_h, MATCH_CRC32C_H, MASK_CRC32C_H)
++DECLARE_INSN(crc32c_w, MATCH_CRC32C_W, MASK_CRC32C_W)
++DECLARE_INSN(crc32_d, MATCH_CRC32_D, MASK_CRC32_D)
++DECLARE_INSN(crc32c_d, MATCH_CRC32C_D, MASK_CRC32C_D)
++DECLARE_INSN(cmix, MATCH_CMIX, MASK_CMIX)
++DECLARE_INSN(cmov, MATCH_CMOV, MASK_CMOV)
++DECLARE_INSN(fsl, MATCH_FSL, MASK_FSL)
++DECLARE_INSN(fsr, MATCH_FSR, MASK_FSR)
++DECLARE_INSN(fsri, MATCH_FSRI, MASK_FSRI)
++DECLARE_INSN(fslw, MATCH_FSLW, MASK_FSLW)
++DECLARE_INSN(fsrw, MATCH_FSRW, MASK_FSRW)
++DECLARE_INSN(fsriw, MATCH_FSRIW, MASK_FSRIW)
+ DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
+ DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
+ DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ)
+@@ -1365,6 +3548,13 @@ DECLARE_CSR(mhcounteren, CSR_MHCOUNTEREN, CSR_CLASS_I, PRIV_SPEC_CLASS_1P9P1, PR
+ DECLARE_CSR(fflags, CSR_FFLAGS, CSR_CLASS_F, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+ DECLARE_CSR(frm, CSR_FRM, CSR_CLASS_F, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+ DECLARE_CSR(fcsr, CSR_FCSR, CSR_CLASS_F, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
++DECLARE_CSR(vstart, CSR_VSTART, CSR_CLASS_V, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
++DECLARE_CSR(vxsat, CSR_VXSAT, CSR_CLASS_V, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
++DECLARE_CSR(vxrm, CSR_VXRM, CSR_CLASS_V, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
++DECLARE_CSR(vcsr, CSR_VCSR, CSR_CLASS_V, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
++DECLARE_CSR(vl, CSR_VL, CSR_CLASS_V, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
++DECLARE_CSR(vtype, CSR_VTYPE, CSR_CLASS_V, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
++DECLARE_CSR(vlenb, CSR_VLENB, CSR_CLASS_V, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+ DECLARE_CSR(dcsr, CSR_DCSR, CSR_CLASS_DEBUG, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+ DECLARE_CSR(dpc, CSR_DPC, CSR_CLASS_DEBUG, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+ DECLARE_CSR(dscratch0, CSR_DSCRATCH0, CSR_CLASS_DEBUG, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+@@ -1391,4 +3581,4 @@ DECLARE_CSR_ALIAS(itrigger, CSR_TDATA1, CSR_CLASS_DEBUG, PRIV_SPEC_CLASS_NONE, P
+ DECLARE_CSR_ALIAS(etrigger, CSR_TDATA1, CSR_CLASS_DEBUG, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+ DECLARE_CSR_ALIAS(textra32, CSR_TDATA3, CSR_CLASS_DEBUG, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+ DECLARE_CSR_ALIAS(textra64, CSR_TDATA3, CSR_CLASS_DEBUG, PRIV_SPEC_CLASS_NONE, PRIV_SPEC_CLASS_NONE)
+-#endif /* DECLARE_CSR_ALIAS.  */
++#endif /* DECLARE_CSR_ALIAS.  */
+\ No newline at end of file
+diff --git a/include/opcode/riscv.h b/include/opcode/riscv.h
+index 2f1bc793e5..c90fc16f34 100644
+--- a/include/opcode/riscv.h
++++ b/include/opcode/riscv.h
+@@ -52,6 +52,28 @@ static const char * const riscv_pred_succ[16] =
+   "i", "iw", "ir", "irw", "io", "iow", "ior", "iorw"
+ };
+ 
++/* List of vsetvli vsew constants.  */
++static const char * const riscv_vsew[8] =
++{
++  "e8", "e16", "e32", "e64", "e128", "e256", "e512", "e1024"
++};
++
++/* List of vsetvli vlmul constants.  */
++static const char * const riscv_vlmul[8] =
++{
++  "m1", "m2", "m4", "m8", 0, "mf8", "mf4", "mf2"
++};
++
++static const char * const riscv_vta[2] =
++{
++  "tu", "ta"
++};
++
++static const char * const riscv_vma[2] =
++{
++  "mu", "ma"
++};
++
+ #define RVC_JUMP_BITS 11
+ #define RVC_JUMP_REACH ((1ULL << RVC_JUMP_BITS) * RISCV_JUMP_ALIGN)
+ 
+@@ -100,6 +122,17 @@ static const char * const riscv_pred_succ[16] =
+ #define EXTRACT_RVC_J_IMM(x) \
+   ((RV_X(x, 3, 3) << 1) | (RV_X(x, 11, 1) << 4) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 6, 1) << 7) | (RV_X(x, 9, 2) << 8) | (RV_X(x, 8, 1) << 10) | (-RV_X(x, 12, 1) << 11))
+ 
++#define EXTRACT_RVV_VI_IMM(x) \
++  (RV_X(x, 15, 5) | (-RV_X(x, 19, 1) << 5))
++#define EXTRACT_RVV_VI_UIMM(x) \
++  (RV_X(x, 15, 5))
++#define EXTRACT_RVV_OFFSET(x) \
++  (RV_X(x, 29, 3))
++#define EXTRACT_RVV_VB_IMM(x) \
++  (RV_X(x, 20, 10))
++#define EXTRACT_RVV_VC_IMM(x) \
++  (RV_X(x, 20, 11))
++
+ #define ENCODE_ITYPE_IMM(x) \
+   (RV_X(x, 0, 12) << 20)
+ #define ENCODE_STYPE_IMM(x) \
+@@ -234,6 +267,11 @@ static const char * const riscv_pred_succ[16] =
+ #define OP_SH_FUNCT7           25
+ #define OP_MASK_FUNCT2         0x3
+ #define OP_SH_FUNCT2           25
++#define OP_MASK_PREF_TYPE      0x0f
++#define OP_SH_PREF_TYPE        20
++#define OP_MASK_PREF_OFFSET    0xff
++#define OP_SH_PREF_OFFSET      24
++
+ 
+ /* RVC fields.  */
+ 
+@@ -256,6 +294,35 @@ static const char * const riscv_pred_succ[16] =
+ #define OP_MASK_CFUNCT2                0x3
+ #define OP_SH_CFUNCT2          5
+ 
++/* RVV fields.  */
++
++#define OP_MASK_VD		0x1f
++#define OP_SH_VD		7
++#define OP_MASK_VS1		0x1f
++#define OP_SH_VS1		15
++#define OP_MASK_VS2		0x1f
++#define OP_SH_VS2		20
++#define OP_MASK_VIMM		0x1f
++#define OP_SH_VIMM		15
++#define OP_MASK_VMASK		0x1
++#define OP_SH_VMASK		25
++#define OP_MASK_VFUNCT6		0x3f
++#define OP_SH_VFUNCT6		26
++
++#define OP_MASK_VLMUL		0x7
++#define OP_SH_VLMUL		0
++#define OP_MASK_VSEW		0x7
++#define OP_SH_VSEW		3
++#define OP_MASK_VTA		0x1
++#define OP_SH_VTA		6
++#define OP_MASK_VMA		0x1
++#define OP_SH_VMA		7
++#define OP_MASK_VTYPE_RES	0x1
++#define OP_SH_VTYPE_RES 	10
++
++#define OP_MASK_VWD		0x1
++#define OP_SH_VWD		26
++
+ /* ABI names for selected x-registers.  */
+ 
+ #define X_RA 1
+@@ -269,6 +336,8 @@ static const char * const riscv_pred_succ[16] =
+ 
+ #define NGPR 32
+ #define NFPR 32
++#define NVECR 32
++#define NVECM 1
+ 
+ /* These fake label defines are use by both the assembler, and
+    libopcodes.  The assembler uses this when it needs to generate a fake
+@@ -309,6 +378,14 @@ enum riscv_insn_class
+    INSN_CLASS_D_AND_C,
+    INSN_CLASS_F_AND_C,
+    INSN_CLASS_Q,
++   INSN_CLASS_ZBA,
++   INSN_CLASS_ZBB,
++   INSN_CLASS_ZBC,
++   INSN_CLASS_ZBS,
++   INSN_CLASS_V,
++   INSN_CLASS_V_AND_F,
++   INSN_CLASS_V_OR_ZVAMO,
++   INSN_CLASS_V_OR_ZVLSSEG,
+   };
+ 
+ /* This structure holds information for a particular instruction.  */
+@@ -351,7 +428,8 @@ enum riscv_isa_spec_class
+ 
+   ISA_SPEC_CLASS_2P2,
+   ISA_SPEC_CLASS_20190608,
+-  ISA_SPEC_CLASS_20191213
++  ISA_SPEC_CLASS_20191213,
++  ISA_SPEC_CLASS_DRAFT
+ };
+ 
+ /* This structure holds version information for specific ISA.  */
+@@ -476,6 +554,12 @@ enum
+   M_CALL,
+   M_J,
+   M_LI,
++  M_ZEXTH,
++  M_ZEXTW,  
++  M_SEXTB,  
++  M_SEXTH,
++  M_VMSGE,
++  M_VMSGEU,
+   M_NUM_MACROS
+ };
+ 
+@@ -484,6 +568,8 @@ extern const char * const riscv_gpr_names_numeric[NGPR];
+ extern const char * const riscv_gpr_names_abi[NGPR];
+ extern const char * const riscv_fpr_names_numeric[NFPR];
+ extern const char * const riscv_fpr_names_abi[NFPR];
++extern const char * const riscv_vecr_names_numeric[NVECR];
++extern const char * const riscv_vecm_names_numeric[NVECM];
+ 
+ extern const struct riscv_opcode riscv_opcodes[];
+ extern const struct riscv_opcode riscv_insn_types[];
+diff --git a/opcodes/riscv-dis.c b/opcodes/riscv-dis.c
+index 655ce4ad0b..e5989566f2 100644
+--- a/opcodes/riscv-dis.c
++++ b/opcodes/riscv-dis.c
+@@ -43,6 +43,7 @@ struct riscv_private_data
+ 
+ static const char * const *riscv_gpr_names;
+ static const char * const *riscv_fpr_names;
++static const char * const *riscv_vecr_names;
+ 
+ /* Other options.  */
+ static int no_aliases;	/* If set disassemble as most general inst.  */
+@@ -52,6 +53,7 @@ set_default_riscv_dis_options (void)
+ {
+   riscv_gpr_names = riscv_gpr_names_abi;
+   riscv_fpr_names = riscv_fpr_names_abi;
++  riscv_vecr_names = riscv_vecr_names_numeric;
+   no_aliases = 0;
+ }
+ 
+@@ -344,6 +346,14 @@ print_insn_args (const char *d, insn_t l, bfd_vma pc, disassemble_info *info)
+ 	  print (info->stream, "0x%x", (int)EXTRACT_OPERAND (SHAMTW, l));
+ 	  break;
+ 
++	case '#':
++	  print (info->stream, "0x%x", (int)EXTRACT_OPERAND (PREF_TYPE, l));
++	  break;
++
++	case '+':
++	  print (info->stream, "0x%x", (int)EXTRACT_OPERAND (PREF_OFFSET, l));
++	  break;
++
+ 	case 'S':
+ 	case 'U':
+ 	  print (info->stream, "%s", riscv_fpr_names[rs1]);
+@@ -401,6 +411,88 @@ print_insn_args (const char *d, insn_t l, bfd_vma pc, disassemble_info *info)
+ 	  print (info->stream, "%d", rs1);
+ 	  break;
+ 
++	case 'V': /* RVV */
++	  switch (*++d)
++	    {
++	    case 'd':
++	    case 'f':
++	      print (info->stream, "%s",
++		      riscv_vecr_names[EXTRACT_OPERAND (VD, l)]);
++	      break;
++
++	    case 'e':
++	      if (!EXTRACT_OPERAND (VWD, l))
++		print (info->stream, "%s", riscv_gpr_names[0]);
++	      else
++		print (info->stream, "%s",
++		       riscv_vecr_names[EXTRACT_OPERAND (VD, l)]);
++	      break;
++
++	    case 's':
++	      print (info->stream, "%s",
++		      riscv_vecr_names[EXTRACT_OPERAND (VS1, l)]);
++	      break;
++
++	    case 't':
++	    case 'u': /* VS1 == VS2 already verified at this point.  */
++	    case 'v': /* VD == VS1 == VS2 already verified at this point.  */
++	      print (info->stream, "%s",
++		      riscv_vecr_names[EXTRACT_OPERAND (VS2, l)]);
++	      break;
++
++	    case '0':
++	      print (info->stream, "%s", riscv_vecr_names[0]);
++	      break;
++
++	    case 'b':
++	    case 'c':
++	      {
++		int imm = (*d == 'b') ? EXTRACT_RVV_VB_IMM (l)
++				      : EXTRACT_RVV_VC_IMM (l);
++		unsigned int imm_vlmul = EXTRACT_OPERAND (VLMUL, imm);
++		unsigned int imm_vsew = EXTRACT_OPERAND (VSEW, imm);
++		unsigned int imm_vta = EXTRACT_OPERAND (VTA, imm);
++		unsigned int imm_vma = EXTRACT_OPERAND (VMA, imm);
++		unsigned int imm_vtype_res = EXTRACT_OPERAND (VTYPE_RES, imm);
++
++		if (imm_vsew < ARRAY_SIZE (riscv_vsew)
++		    && imm_vlmul < ARRAY_SIZE (riscv_vlmul)
++		    && imm_vta < ARRAY_SIZE (riscv_vta)
++		    && imm_vma < ARRAY_SIZE (riscv_vma)
++		    && ! imm_vtype_res)
++		  print (info->stream, "%s,%s,%s,%s", riscv_vsew[imm_vsew],
++			 riscv_vlmul[imm_vlmul], riscv_vta[imm_vta],
++			 riscv_vma[imm_vma]);
++		else
++		  print (info->stream, "%d", imm);
++	      }
++	      break;
++
++	    case 'i':
++	      print (info->stream, "%d", (int)EXTRACT_RVV_VI_IMM (l));
++	      break;
++
++	    case 'j':
++	      print (info->stream, "%d", (int)EXTRACT_RVV_VI_UIMM (l));
++	      break;
++
++	    case 'k':
++	      print (info->stream, "%d", (int)EXTRACT_RVV_OFFSET (l));
++	      break;
++
++	    case 'm':
++	      if (! EXTRACT_OPERAND (VMASK, l))
++		print (info->stream, ",%s", riscv_vecm_names_numeric[0]);
++	      break;
++
++	    default:
++	      /* xgettext:c-format */
++	      print (info->stream, _("# internal error, undefined modifier (V%c)"),
++		     *d);
++	      return;
++	    }
++	  break;
++
+ 	default:
+ 	  /* xgettext:c-format */
+ 	  print (info->stream, _("# internal error, undefined modifier (%c)"),
+diff --git a/opcodes/riscv-opc.c b/opcodes/riscv-opc.c
+index 03e3bd7c05..ffa817cc65 100644
+--- a/opcodes/riscv-opc.c
++++ b/opcodes/riscv-opc.c
+@@ -24,6 +24,130 @@
+ #include "opcode/riscv.h"
+ #include <stdio.h>
+ 
++#define MASK_SHAMT (OP_MASK_SHAMT << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_32     (0b11111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_64     (0b111111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_P_32   (0b00001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_P_64   (0b000001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_N_32   (0b00011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_N_64   (0b000011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_B_32   (0b00111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_B_64   (0b000111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_H_32   (0b01111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_H_64   (0b001111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV_W_64   (0b011111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_32    (0b11110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_64    (0b111110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_N_32  (0b00010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_N_64  (0b000010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_B_32  (0b00110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_B_64  (0b000110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_H_32  (0b01110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_H_64  (0b001110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV2_W_64  (0b011110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV4_32    (0b11100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV4_64    (0b111100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV4_B_32  (0b00100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV4_B_64  (0b000100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV4_H_32  (0b01100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV4_H_64  (0b001100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV4_W_64  (0b011100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV8_32    (0b11000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV8_64    (0b111000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV8_H_32  (0b01000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV8_H_64  (0b001000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV8_W_64  (0b011000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV16_32   (0b10000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV16_64   (0b110000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV16_W_64 (0b010000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV32_64   (0b100000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_32     (0b1111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_64     (0b11111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_N_32   (0b0001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_N_64   (0b00001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_B_32   (0b0011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_B_64   (0b00011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_H_32   (0b0111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_H_64   (0b00111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP_W_64   (0b01111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP2_32    (0b1110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP2_64    (0b11110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP2_B_32  (0b0010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP2_B_64  (0b00010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP2_H_32  (0b0110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP2_H_64  (0b00110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP2_W_64  (0b01110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP4_32    (0b1100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP4_64    (0b11100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP4_H_32  (0b0100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP4_H_64  (0b00100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP4_W_64  (0b01100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP8_32    (0b1000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP8_64    (0b11000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP8_W_64  (0b01000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ZIP16_64   (0b10000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_32     (0b1111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_64     (0b11111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_N_32   (0b0001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_N_64   (0b00001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_B_32   (0b0011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_B_64   (0b00011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_H_32   (0b0111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_H_64   (0b00111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP_W_64   (0b01111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP2_32    (0b1110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP2_64    (0b11110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP2_B_32  (0b0010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP2_B_64  (0b00010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP2_H_32  (0b0110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP2_H_64  (0b00110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP2_W_64  (0b01110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP4_32    (0b1100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP4_64    (0b11100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP4_H_32  (0b0100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP4_H_64  (0b00100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP4_W_64  (0b01100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP8_32    (0b1000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP8_64    (0b11000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP8_W_64  (0b01000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_UNZIP16_64   (0b10000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_32     (0b11111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_64     (0b111111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_P_32   (0b00001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_P_64   (0b000001 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_N_32   (0b00011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_N_64   (0b000011 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_B_32   (0b00111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_B_64   (0b000111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_H_32   (0b01111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_H_64   (0b001111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_W_64   (0b011111 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_32    (0b11110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_64    (0b111110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_N_32  (0b00010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_N_64  (0b000010 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_B_32  (0b00110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_B_64  (0b000110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_H_32  (0b01110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_H_64  (0b001110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC2_W_64  (0b011110 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC4_32    (0b11100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC4_64    (0b111100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC4_B_32  (0b00100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC4_B_64  (0b000100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC4_H_32  (0b01100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC4_H_64  (0b001100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC4_W_64  (0b011100 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC8_32    (0b11000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC8_64    (0b111000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC8_H_32  (0b01000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC8_H_64  (0b001000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC8_W_64  (0b011000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC16_32   (0b10000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC16_64   (0b110000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC16_W_64 (0b010000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC32_64   (0b100000 << OP_SH_SHAMT)
++
+ /* Register names used by gas and objdump.  */
+ 
+ const char * const riscv_gpr_names_numeric[NGPR] =
+@@ -56,6 +180,20 @@ const char * const riscv_fpr_names_abi[NFPR] = {
+   "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"
+ };
+ 
++const char * const riscv_vecr_names_numeric[NVECR] =
++{
++  "v0",   "v1",   "v2",   "v3",   "v4",   "v5",   "v6",   "v7",
++  "v8",   "v9",   "v10",  "v11",  "v12",  "v13",  "v14",  "v15",
++  "v16",  "v17",  "v18",  "v19",  "v20",  "v21",  "v22",  "v23",
++  "v24",  "v25",  "v26",  "v27",  "v28",  "v29",  "v30",  "v31"
++};
++
++const char * const riscv_vecm_names_numeric[NVECM] =
++{
++  "v0.t"
++};
++
++
+ /* The order of overloaded instructions matters.  Label arguments and
+    register arguments look the same. Instructions that can have either
+    for arguments must apear in the correct order in this table for the
+@@ -79,6 +217,15 @@ const char * const riscv_fpr_names_abi[NFPR] = {
+ #define MASK_AQ (OP_MASK_AQ << OP_SH_AQ)
+ #define MASK_RL (OP_MASK_RL << OP_SH_RL)
+ #define MASK_AQRL (MASK_AQ | MASK_RL)
++#define MASK_SHAMT (OP_MASK_SHAMT << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV8_32 (0b11000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_REV8_64 (0b111000 << OP_SH_SHAMT)
++#define MATCH_SHAMT_ORC_B (0b00111 << OP_SH_SHAMT)
++#define MASK_VD  (OP_MASK_VD << OP_SH_VD)
++#define MASK_VS1 (OP_MASK_VS1 << OP_SH_VS1)
++#define MASK_VS2 (OP_MASK_VS2 << OP_SH_VS2)
++#define MASK_VMASK (OP_MASK_VMASK << OP_SH_VMASK)
++
+ 
+ static int
+ match_opcode (const struct riscv_opcode *op, insn_t insn)
+@@ -101,6 +248,47 @@ match_rs1_eq_rs2 (const struct riscv_opcode *op, insn_t insn)
+   return match_opcode (op, insn) && rs1 == rs2;
+ }
+ 
++static int
++match_vs1_eq_vs2 (const struct riscv_opcode *op,
++		  insn_t insn)
++{
++  int vs1 = (insn & MASK_VS1) >> OP_SH_VS1;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++
++  return match_opcode (op, insn) && vs1 == vs2;
++}
++
++static int
++match_vs1_eq_vs2_neq_vm (const struct riscv_opcode *op,
++			 insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs1 = (insn & MASK_VS1) >> OP_SH_VS1;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn) && vs1 == vs2;
++
++  if (!vm && vm == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn) && vs1 == vs2;
++  return 0;
++}
++
++static int
++match_vd_eq_vs1_eq_vs2 (const struct riscv_opcode *op,
++			insn_t insn)
++{
++  int vd =  (insn & MASK_VD) >> OP_SH_VD;
++  int vs1 = (insn & MASK_VS1) >> OP_SH_VS1;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++
++  return match_opcode (op, insn) && vd == vs1 && vs1 == vs2;
++}
++
++
+ static int
+ match_rd_nonzero (const struct riscv_opcode *op, insn_t insn)
+ {
+@@ -195,6 +383,249 @@ match_srxi_as_c_srxi (const struct riscv_opcode *op, insn_t insn)
+   return match_opcode (op, insn) && EXTRACT_RVC_IMM (insn) != 0;
+ }
+ 
++
++/* These are used to check the vector constraints.  */
++
++static int
++match_widen_vd_neq_vs1_neq_vs2_neq_vm (const struct riscv_opcode *op,
++				       insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs1 = (insn & MASK_VS1) >> OP_SH_VS1;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if ((vd % 2) != 0)
++    return 0;
++    //*error = "illegal operands vd must be multiple of 2";
++  else if (vs1 >= vd && vs1 <= (vd + 1))
++    return 0;
++    //*error = "illegal operands vd cannot overlap vs1";
++  else if (vs2 >= vd && vs2 <= (vd + 1))
++    return 0;
++    //*error = "illegal operands vd cannot overlap vs2";
++  else if (!vm && vm >= vd && vm <= (vd + 1))
++    return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_widen_vd_neq_vs1_neq_vm (const struct riscv_opcode *op,
++			       insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs1 = (insn & MASK_VS1) >> OP_SH_VS1;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if ((vd % 2) != 0) return 0;
++    //*error = "illegal operands vd must be multiple of 2";
++  else if ((vs2 % 2) != 0) return 0;
++    //*error = "illegal operands vs2 must be multiple of 2";
++  else if (vs1 >= vd && vs1 <= (vd + 1)) return 0;
++    //*error = "illegal operands vd cannot overlap vs1";
++  else if (!vm && vm >= vd && vm <= (vd + 1)) return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_widen_vd_neq_vs2_neq_vm (const struct riscv_opcode *op,
++			       insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if ((vd % 2) != 0) return 0;
++    //*error = "illegal operands vd must be multiple of 2";
++  else if (vs2 >= vd && vs2 <= (vd + 1)) return 0;
++    //*error = "illegal operands vd cannot overlap vs2";
++  else if (!vm && vm >= vd && vm <= (vd + 1)) return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_widen_vd_neq_vm (const struct riscv_opcode *op,
++		       insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if ((vd % 2) != 0) return 0;
++   // *error = "illegal operands vd must be multiple of 2";
++  else if ((vs2 % 2) != 0) return 0;
++   // *error = "illegal operands vs2 must be multiple of 2";
++  else if (!vm && vm >= vd && vm <= (vd + 1)) return 0;
++   // *error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_narrow_vd_neq_vs2_neq_vm (const struct riscv_opcode *op,
++				insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if ((vs2 % 2) != 0) return 0;
++    //*error = "illegal operands vd must be multiple of 2";
++  else if (vd >= vs2 && vd <= (vs2 + 1)) return 0;
++    //*error = "illegal operands vd cannot overlap vs2";
++  else if (!vm && vd >= vm && vd <= (vm + 1)) return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_vd_neq_vs1_neq_vs2 (const struct riscv_opcode *op,
++			  insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs1 = (insn & MASK_VS1) >> OP_SH_VS1;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if (vs1 == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vs1";
++  else if (vs2 == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vs2";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_vd_neq_vs1_neq_vs2_neq_vm (const struct riscv_opcode *op,
++				 insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs1 = (insn & MASK_VS1) >> OP_SH_VS1;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  if (vs1 == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vs1";
++  else if (vs2 == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vs2";
++  else if (!vm && vm == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_vd_neq_vs2_neq_vm (const struct riscv_opcode *op,
++			 insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if (vs2 == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vs2";
++  else if (!vm && vm == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else 
++    return match_opcode (op, insn);
++  return 0;
++}
++
++/* v[m]adc and v[m]sbc use the vm encoding to encode the
++   carry-in v0 register.  The carry-in v0 register can not
++   overlap with the vd, too.  Therefore, we use the same
++   match_vd_neq_vm to check the overlap constraints.  */
++
++static int
++match_vd_neq_vm (const struct riscv_opcode *op,
++		 insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vm = (insn & MASK_VMASK) >> OP_SH_VMASK;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if (!vm && vm == vd) return 0;
++    //*error = "illegal operands vd cannot overlap vm";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_vls_nf_rv (const struct riscv_opcode *op,
++		 insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int nf = ((insn & (0x7 << 29) ) >> 29) + 1;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if ((vd % nf) != 0) return 0;
++    //*error = "illegal operands vd must be multiple of nf";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++static int
++match_vmv_nf_rv (const struct riscv_opcode *op,
++		 insn_t insn)
++{
++  int vd = (insn & MASK_VD) >> OP_SH_VD;
++  int vs2 = (insn & MASK_VS2) >> OP_SH_VS2;
++  int nf = ((insn & (0x7 << 15) ) >> 15) + 1;
++
++  //if (!constraints || error == NULL)
++  //  return match_opcode (op, insn);
++
++  if ((vd % nf) != 0) return 0;
++    //*error = "illegal operands vd must be multiple of nf";
++  else if ((vs2 % nf) != 0) return 0;
++    //*error = "illegal operands vs2 must be multiple of nf";
++  else
++    return match_opcode (op, insn);
++  return 0;
++}
++
++
+ const struct riscv_opcode riscv_opcodes[] =
+ {
+ /* name,     xlen, isa,   operands, match, mask, match_func, pinfo.  */
+@@ -749,6 +1180,10 @@ const struct riscv_opcode riscv_opcodes[] =
+ {"c.fsw",     32, INSN_CLASS_F_AND_C, "CD,Ck(Cs)",  MATCH_C_FSW, MASK_C_FSW, match_opcode, INSN_DREF|INSN_4_BYTE },
+ 
+ /* Supervisor instructions */
++{"cflush.d.l1",   0, INSN_CLASS_I, "s", 	MATCH_CFLUSH_D_L1, MASK_CFLUSH_D_L1, match_opcode, 0 },
++{"cdiscard.d.l1", 0, INSN_CLASS_I, "s",   MATCH_CDISCARD_D_L1, MASK_CDISCARD_D_L1, match_opcode, 0 },
++{"cflush.d.l2",   0, INSN_CLASS_I, "s",   MATCH_CFLUSH_D_L2, MASK_CFLUSH_D_L2, match_opcode, 0 },
++{"cdiscard.d.l2", 0, INSN_CLASS_I, "s",   MATCH_CDISCARD_D_L2, MASK_CDISCARD_D_L2, match_opcode, 0 },
+ {"csrr",       0, INSN_CLASS_I,   "d,E",  MATCH_CSRRS, MASK_CSRRS | MASK_RS1, match_opcode, INSN_ALIAS },
+ {"csrwi",      0, INSN_CLASS_I,   "E,Z",  MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, INSN_ALIAS },
+ {"csrsi",      0, INSN_CLASS_I,   "E,Z",  MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, INSN_ALIAS },
+@@ -780,6 +1215,879 @@ const struct riscv_opcode riscv_opcodes[] =
+ {"sfence.vma", 0, INSN_CLASS_I,   "s,t",  MATCH_SFENCE_VMA, MASK_SFENCE_VMA, match_opcode, 0 },
+ {"wfi",        0, INSN_CLASS_I,   "",     MATCH_WFI, MASK_WFI, match_opcode, 0 },
+ 
++{"clz",        0, INSN_CLASS_ZBB,  "d,s",   MATCH_CLZ, MASK_CLZ, match_opcode, 0 },
++{"ctz",        0, INSN_CLASS_ZBB,  "d,s",   MATCH_CTZ, MASK_CTZ, match_opcode, 0 },
++{"cpop",       0, INSN_CLASS_ZBB,  "d,s",   MATCH_CPOP, MASK_CPOP, match_opcode, 0 },
++{"min",        0, INSN_CLASS_ZBB,  "d,s,t", MATCH_MIN, MASK_MIN, match_opcode, 0 },
++{"max",        0, INSN_CLASS_ZBB,  "d,s,t", MATCH_MAX, MASK_MAX, match_opcode, 0 },
++{"minu",       0, INSN_CLASS_ZBB,  "d,s,t", MATCH_MINU, MASK_MINU, match_opcode, 0 },
++{"maxu",       0, INSN_CLASS_ZBB,  "d,s,t", MATCH_MAXU, MASK_MAXU, match_opcode, 0 },
++{"sext.b",     0, INSN_CLASS_ZBB,  "d,s",   MATCH_SEXT_B, MASK_SEXT_B, match_opcode, 0 },
++{"sext.b",     0, INSN_CLASS_I,    "d,s",   0, (int) M_SEXTB, match_never, INSN_MACRO },
++{"sext.h",     0, INSN_CLASS_ZBB,  "d,s",   MATCH_SEXT_H, MASK_SEXT_H, match_opcode, 0 },
++{"sext.h",     0, INSN_CLASS_I,    "d,s",   0, (int) M_SEXTH, match_never, INSN_MACRO },
++{"zext.h",    32, INSN_CLASS_ZBB,  "d,s",   MATCH_PACK, MASK_PACK | MASK_RS2, match_opcode, 0 },
++{"zext.h",    64, INSN_CLASS_ZBB,  "d,s",   MATCH_PACKW, MASK_PACKW | MASK_RS2, match_opcode, 0 },
++{"zext.h",     0, INSN_CLASS_I,    "d,s",   0, (int) M_ZEXTH, match_never, INSN_MACRO },
++{"andn",       0, INSN_CLASS_ZBB,  "d,s,t", MATCH_ANDN, MASK_ANDN, match_opcode, 0 },
++{"orn",        0, INSN_CLASS_ZBB,  "d,s,t", MATCH_ORN, MASK_ORN, match_opcode, 0 },
++{"xnor",       0, INSN_CLASS_ZBB,  "d,s,t", MATCH_XNOR, MASK_XNOR, match_opcode, 0 },
++{"rol",        0, INSN_CLASS_ZBB,  "d,s,t", MATCH_ROL, MASK_ROL, match_opcode, 0 },
++{"rori",       0, INSN_CLASS_ZBB,  "d,s,>", MATCH_RORI, MASK_RORI, match_opcode, 0 },
++{"ror",        0, INSN_CLASS_ZBB,  "d,s,t", MATCH_ROR, MASK_ROR, match_opcode, 0 },
++{"ror",        0, INSN_CLASS_ZBB,  "d,s,>", MATCH_RORI, MASK_RORI, match_opcode, INSN_ALIAS },
++{"rev8",      32, INSN_CLASS_ZBB,  "d,s",   MATCH_GREVI | MATCH_SHAMT_REV8_32 , MASK_GREVI | MASK_SHAMT, match_opcode, 0 },
++{"rev8",      64, INSN_CLASS_ZBB,  "d,s",   MATCH_GREVI | MATCH_SHAMT_REV8_64 , MASK_GREVI | MASK_SHAMT, match_opcode, 0 },
++{"orc.b",      0, INSN_CLASS_ZBB,  "d,s",   MATCH_GORCI | MATCH_SHAMT_ORC_B, MASK_GORCI | MASK_SHAMT, match_opcode, 0 },
++{"clzw",      64, INSN_CLASS_ZBB,  "d,s",   MATCH_CLZW, MASK_CLZW, match_opcode, 0 },
++{"ctzw",      64, INSN_CLASS_ZBB,  "d,s",   MATCH_CTZW, MASK_CTZW, match_opcode, 0 },
++{"cpopw",     64, INSN_CLASS_ZBB,  "d,s",   MATCH_CPOPW, MASK_CPOPW, match_opcode, 0 },
++{"rolw",      64, INSN_CLASS_ZBB,  "d,s,t", MATCH_ROLW, MASK_ROLW, match_opcode, 0 },
++{"roriw",     64, INSN_CLASS_ZBB,  "d,s,<", MATCH_RORIW, MASK_RORIW, match_opcode, 0 },
++{"rorw",      64, INSN_CLASS_ZBB,  "d,s,t", MATCH_RORW, MASK_RORW, match_opcode, 0 },
++{"rorw",      64, INSN_CLASS_ZBB,  "d,s,<", MATCH_RORIW, MASK_RORIW, match_opcode, 0 },
++{"sh1add",     0, INSN_CLASS_ZBA,  "d,s,t", MATCH_SH1ADD, MASK_SH1ADD, match_opcode, 0 },
++{"sh2add",     0, INSN_CLASS_ZBA,  "d,s,t", MATCH_SH2ADD, MASK_SH2ADD, match_opcode, 0 },
++{"sh3add",     0, INSN_CLASS_ZBA,  "d,s,t", MATCH_SH3ADD, MASK_SH3ADD, match_opcode, 0 },
++{"sh1add.uw", 64, INSN_CLASS_ZBA,  "d,s,t", MATCH_SH1ADD_UW, MASK_SH1ADD_UW, match_opcode, 0 },
++{"sh2add.uw", 64, INSN_CLASS_ZBA,  "d,s,t", MATCH_SH2ADD_UW, MASK_SH2ADD_UW, match_opcode, 0 },
++{"sh3add.uw", 64, INSN_CLASS_ZBA,  "d,s,t", MATCH_SH3ADD_UW, MASK_SH3ADD_UW, match_opcode, 0 },
++{"zext.w",    64, INSN_CLASS_ZBA,  "d,s",   MATCH_ADD_UW, MASK_ADD_UW | MASK_RS2, match_opcode, INSN_ALIAS },
++{"zext.w",    64, INSN_CLASS_I,     "d,s",       0, (int) M_ZEXTW, match_never, INSN_MACRO },
++{"add.uw",    64, INSN_CLASS_ZBA,  "d,s,t", MATCH_ADD_UW, MASK_ADD_UW, match_opcode, 0 },
++{"slli.uw",   64, INSN_CLASS_ZBA,  "d,s,>", MATCH_SLLI_UW, MASK_SLLI_UW, match_opcode, 0 },
++{"clmul",      0, INSN_CLASS_ZBC,  "d,s,t", MATCH_CLMUL, MASK_CLMUL, match_opcode, 0 },
++{"clmulh",     0, INSN_CLASS_ZBC,  "d,s,t", MATCH_CLMULH, MASK_CLMULH, match_opcode, 0 },
++{"clmulr",     0, INSN_CLASS_ZBC,  "d,s,t", MATCH_CLMULR, MASK_CLMULR, match_opcode, 0 },
++
++
++/* ZBS instructions */
++{"bclr",       0, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BCLR, MASK_BCLR, match_opcode, 0 },
++{"bclri",      0, INSN_CLASS_ZBS,  "d,s,>",    MATCH_BCLRI, MASK_BCLRI, match_opcode, 0 },
++{"bext",       0, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BEXT, MASK_BEXT, match_opcode, 0 },
++{"bexti",      0, INSN_CLASS_ZBS,  "d,s,>",    MATCH_BEXTI, MASK_BEXTI, match_opcode, 0 },
++{"binv",       0, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BINV, MASK_BINV, match_opcode, 0 },
++{"binvi",      0, INSN_CLASS_ZBS,  "d,s,>",    MATCH_BINVI, MASK_BINVI, match_opcode, 0 },
++{"bset",       0, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BSET, MASK_BSET, match_opcode, 0 },
++{"bseti",      0, INSN_CLASS_ZBS,  "d,s,>",    MATCH_BSETI, MASK_BSETI, match_opcode, 0 },
++
++/* B instructions excluded from spec1.0 */
++{"bsetw",     64, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BSETW, MASK_BSETW, match_opcode, 0 },
++{"bclrw",     64, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BCLRW, MASK_BCLRW, match_opcode, 0 },
++{"binvw",     64, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BINVW, MASK_BINVW, match_opcode, 0 },
++{"bextw",     64, INSN_CLASS_ZBS,  "d,s,t",    MATCH_BEXTW, MASK_BEXTW, match_opcode, 0 },
++{"bsetiw",    64, INSN_CLASS_ZBS,  "d,s,<",    MATCH_BSETIW, MASK_BSETIW, match_opcode, 0 },
++{"bclriw",    64, INSN_CLASS_ZBS,  "d,s,<",    MATCH_BCLRIW, MASK_BCLRIW, match_opcode, 0 },
++{"binviw",    64, INSN_CLASS_ZBS,  "d,s,<",    MATCH_BINVIW, MASK_BINVIW, match_opcode, 0 },
++{"slo",        0, INSN_CLASS_ZBS,  "d,s,t",    MATCH_SLO, MASK_SLO, match_opcode, 0 },
++{"sro",        0, INSN_CLASS_ZBS,  "d,s,t",    MATCH_SRO, MASK_SRO, match_opcode, 0 },
++{"sloi",       0, INSN_CLASS_ZBS,  "d,s,>",    MATCH_SLOI, MASK_SLOI, match_opcode, 0 },
++{"sroi",       0, INSN_CLASS_ZBS,  "d,s,>",    MATCH_SROI, MASK_SROI, match_opcode, 0 },
++{"slow",      64, INSN_CLASS_ZBS,  "d,s,<",    MATCH_SLOW, MASK_SLOW, match_opcode, 0 },
++{"srow",      64, INSN_CLASS_ZBS,  "d,s,<",    MATCH_SROW, MASK_SROW, match_opcode, 0 },
++{"sloiw",     64, INSN_CLASS_ZBS,  "d,s,<",    MATCH_SLOIW, MASK_SLOIW, match_opcode, 0 },
++{"sroiw",     64, INSN_CLASS_ZBS,  "d,s,<",    MATCH_SROIW, MASK_SROIW, match_opcode, 0 },
++
++/* RVV */
++{"vsetvl",     0, INSN_CLASS_V,  "d,s,t",  MATCH_VSETVL, MASK_VSETVL, match_opcode, 0},
++{"vsetvli",    0, INSN_CLASS_V,  "d,s,Vc", MATCH_VSETVLI, MASK_VSETVLI, match_opcode, 0},
++{"vsetivli",   0, INSN_CLASS_V,  "d,Z,Vb", MATCH_VSETIVLI, MASK_VSETIVLI, match_opcode, 0},
++
++{"vle1.v",     0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VLE1V, MASK_VLE1V, match_opcode, INSN_DREF },
++{"vse1.v",     0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VSE1V, MASK_VSE1V, match_opcode, INSN_DREF },
++
++{"vle8.v",     0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE8V, MASK_VLE8V, match_vd_neq_vm, INSN_DREF },
++{"vle16.v",    0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE16V, MASK_VLE16V, match_vd_neq_vm, INSN_DREF },
++{"vle32.v",    0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE32V, MASK_VLE32V, match_vd_neq_vm, INSN_DREF },
++{"vle64.v",    0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE64V, MASK_VLE64V, match_vd_neq_vm, INSN_DREF },
++
++{"vse8.v",     0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VSE8V, MASK_VSE8V, match_vd_neq_vm, INSN_DREF },
++{"vse16.v",    0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VSE16V, MASK_VSE16V, match_vd_neq_vm, INSN_DREF },
++{"vse32.v",    0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VSE32V, MASK_VSE32V, match_vd_neq_vm, INSN_DREF },
++{"vse64.v",    0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VSE64V, MASK_VSE64V, match_vd_neq_vm, INSN_DREF },
++
++{"vlse8.v",    0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VLSE8V, MASK_VLSE8V, match_vd_neq_vm, INSN_DREF },
++{"vlse16.v",   0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VLSE16V, MASK_VLSE16V, match_vd_neq_vm, INSN_DREF },
++{"vlse32.v",   0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VLSE32V, MASK_VLSE32V, match_vd_neq_vm, INSN_DREF },
++{"vlse64.v",   0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VLSE64V, MASK_VLSE64V, match_vd_neq_vm, INSN_DREF },
++
++{"vsse8.v",    0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VSSE8V, MASK_VSSE8V, match_vd_neq_vm, INSN_DREF },
++{"vsse16.v",   0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VSSE16V, MASK_VSSE16V, match_vd_neq_vm, INSN_DREF },
++{"vsse32.v",   0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VSSE32V, MASK_VSSE32V, match_vd_neq_vm, INSN_DREF },
++{"vsse64.v",   0, INSN_CLASS_V,  "Vd,0(s),tVm", MATCH_VSSE64V, MASK_VSSE64V, match_vd_neq_vm, INSN_DREF },
++
++{"vloxei8.v",   0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLOXEI8V, MASK_VLOXEI8V, match_vd_neq_vm, INSN_DREF },
++{"vloxei16.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLOXEI16V, MASK_VLOXEI16V, match_vd_neq_vm, INSN_DREF },
++{"vloxei32.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLOXEI32V, MASK_VLOXEI32V, match_vd_neq_vm, INSN_DREF },
++{"vloxei64.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLOXEI64V, MASK_VLOXEI64V, match_vd_neq_vm, INSN_DREF },
++
++{"vsoxei8.v",   0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSOXEI8V, MASK_VSOXEI8V, match_vd_neq_vm, INSN_DREF },
++{"vsoxei16.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSOXEI16V, MASK_VSOXEI16V, match_vd_neq_vm, INSN_DREF },
++{"vsoxei32.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSOXEI32V, MASK_VSOXEI32V, match_vd_neq_vm, INSN_DREF },
++{"vsoxei64.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSOXEI64V, MASK_VSOXEI64V, match_vd_neq_vm, INSN_DREF },
++
++{"vluxei8.v",   0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLUXEI8V, MASK_VLUXEI8V, match_vd_neq_vm, INSN_DREF },
++{"vluxei16.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLUXEI16V, MASK_VLUXEI16V, match_vd_neq_vm, INSN_DREF },
++{"vluxei32.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLUXEI32V, MASK_VLUXEI32V, match_vd_neq_vm, INSN_DREF },
++{"vluxei64.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VLUXEI64V, MASK_VLUXEI64V, match_vd_neq_vm, INSN_DREF },
++
++{"vsuxei8.v",   0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSUXEI8V, MASK_VSUXEI8V, match_vd_neq_vm, INSN_DREF },
++{"vsuxei16.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSUXEI16V, MASK_VSUXEI16V, match_vd_neq_vm, INSN_DREF },
++{"vsuxei32.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSUXEI32V, MASK_VSUXEI32V, match_vd_neq_vm, INSN_DREF },
++{"vsuxei64.v",  0, INSN_CLASS_V,  "Vd,0(s),VtVm", MATCH_VSUXEI64V, MASK_VSUXEI64V, match_vd_neq_vm, INSN_DREF },
++
++{"vle8ff.v",    0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE8FFV, MASK_VLE8FFV, match_vd_neq_vm, INSN_DREF },
++{"vle16ff.v",   0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE16FFV, MASK_VLE16FFV, match_vd_neq_vm, INSN_DREF },
++{"vle32ff.v",   0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE32FFV, MASK_VLE32FFV, match_vd_neq_vm, INSN_DREF },
++{"vle64ff.v",   0, INSN_CLASS_V,  "Vd,0(s)Vm", MATCH_VLE64FFV, MASK_VLE64FFV, match_vd_neq_vm, INSN_DREF },
++
++{"vlseg2e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E8V, MASK_VLSEG2E8V, match_vd_neq_vm, INSN_DREF },
++{"vsseg2e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG2E8V, MASK_VSSEG2E8V, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E8V, MASK_VLSEG3E8V, match_vd_neq_vm, INSN_DREF },
++{"vsseg3e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG3E8V, MASK_VSSEG3E8V, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E8V, MASK_VLSEG4E8V, match_vd_neq_vm, INSN_DREF },
++{"vsseg4e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG4E8V, MASK_VSSEG4E8V, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E8V, MASK_VLSEG5E8V, match_vd_neq_vm, INSN_DREF },
++{"vsseg5e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG5E8V, MASK_VSSEG5E8V, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E8V, MASK_VLSEG6E8V, match_vd_neq_vm, INSN_DREF },
++{"vsseg6e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG6E8V, MASK_VSSEG6E8V, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E8V, MASK_VLSEG7E8V, match_vd_neq_vm, INSN_DREF },
++{"vsseg7e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG7E8V, MASK_VSSEG7E8V, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E8V, MASK_VLSEG8E8V, match_vd_neq_vm, INSN_DREF },
++{"vsseg8e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG8E8V, MASK_VSSEG8E8V, match_vd_neq_vm, INSN_DREF },
++
++{"vlseg2e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E16V, MASK_VLSEG2E16V, match_vd_neq_vm, INSN_DREF },
++{"vsseg2e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG2E16V, MASK_VSSEG2E16V, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E16V, MASK_VLSEG3E16V, match_vd_neq_vm, INSN_DREF },
++{"vsseg3e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG3E16V, MASK_VSSEG3E16V, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E16V, MASK_VLSEG4E16V, match_vd_neq_vm, INSN_DREF },
++{"vsseg4e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG4E16V, MASK_VSSEG4E16V, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E16V, MASK_VLSEG5E16V, match_vd_neq_vm, INSN_DREF },
++{"vsseg5e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG5E16V, MASK_VSSEG5E16V, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E16V, MASK_VLSEG6E16V, match_vd_neq_vm, INSN_DREF },
++{"vsseg6e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG6E16V, MASK_VSSEG6E16V, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E16V, MASK_VLSEG7E16V, match_vd_neq_vm, INSN_DREF },
++{"vsseg7e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG7E16V, MASK_VSSEG7E16V, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E16V, MASK_VLSEG8E16V, match_vd_neq_vm, INSN_DREF },
++{"vsseg8e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG8E16V, MASK_VSSEG8E16V, match_vd_neq_vm, INSN_DREF },
++
++{"vlseg2e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E32V, MASK_VLSEG2E32V, match_vd_neq_vm, INSN_DREF },
++{"vsseg2e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG2E32V, MASK_VSSEG2E32V, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E32V, MASK_VLSEG3E32V, match_vd_neq_vm, INSN_DREF },
++{"vsseg3e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG3E32V, MASK_VSSEG3E32V, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E32V, MASK_VLSEG4E32V, match_vd_neq_vm, INSN_DREF },
++{"vsseg4e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG4E32V, MASK_VSSEG4E32V, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E32V, MASK_VLSEG5E32V, match_vd_neq_vm, INSN_DREF },
++{"vsseg5e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG5E32V, MASK_VSSEG5E32V, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E32V, MASK_VLSEG6E32V, match_vd_neq_vm, INSN_DREF },
++{"vsseg6e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG6E32V, MASK_VSSEG6E32V, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E32V, MASK_VLSEG7E32V, match_vd_neq_vm, INSN_DREF },
++{"vsseg7e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG7E32V, MASK_VSSEG7E32V, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E32V, MASK_VLSEG8E32V, match_vd_neq_vm, INSN_DREF },
++{"vsseg8e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG8E32V, MASK_VSSEG8E32V, match_vd_neq_vm, INSN_DREF },
++
++{"vlseg2e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E64V, MASK_VLSEG2E64V, match_vd_neq_vm, INSN_DREF },
++{"vsseg2e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG2E64V, MASK_VSSEG2E64V, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E64V, MASK_VLSEG3E64V, match_vd_neq_vm, INSN_DREF },
++{"vsseg3e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG3E64V, MASK_VSSEG3E64V, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E64V, MASK_VLSEG4E64V, match_vd_neq_vm, INSN_DREF },
++{"vsseg4e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG4E64V, MASK_VSSEG4E64V, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E64V, MASK_VLSEG5E64V, match_vd_neq_vm, INSN_DREF },
++{"vsseg5e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG5E64V, MASK_VSSEG5E64V, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E64V, MASK_VLSEG6E64V, match_vd_neq_vm, INSN_DREF },
++{"vsseg6e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG6E64V, MASK_VSSEG6E64V, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E64V, MASK_VLSEG7E64V, match_vd_neq_vm, INSN_DREF },
++{"vsseg7e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG7E64V, MASK_VSSEG7E64V, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E64V, MASK_VLSEG8E64V, match_vd_neq_vm, INSN_DREF },
++{"vsseg8e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VSSEG8E64V, MASK_VSSEG8E64V, match_vd_neq_vm, INSN_DREF },
++
++{"vlsseg2e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG2E8V, MASK_VLSSEG2E8V, match_vd_neq_vm, INSN_DREF },
++{"vssseg2e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG2E8V, MASK_VSSSEG2E8V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg3e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG3E8V, MASK_VLSSEG3E8V, match_vd_neq_vm, INSN_DREF },
++{"vssseg3e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG3E8V, MASK_VSSSEG3E8V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg4e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG4E8V, MASK_VLSSEG4E8V, match_vd_neq_vm, INSN_DREF },
++{"vssseg4e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG4E8V, MASK_VSSSEG4E8V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg5e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG5E8V, MASK_VLSSEG5E8V, match_vd_neq_vm, INSN_DREF },
++{"vssseg5e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG5E8V, MASK_VSSSEG5E8V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg6e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG6E8V, MASK_VLSSEG6E8V, match_vd_neq_vm, INSN_DREF },
++{"vssseg6e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG6E8V, MASK_VSSSEG6E8V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg7e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG7E8V, MASK_VLSSEG7E8V, match_vd_neq_vm, INSN_DREF },
++{"vssseg7e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG7E8V, MASK_VSSSEG7E8V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg8e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG8E8V, MASK_VLSSEG8E8V, match_vd_neq_vm, INSN_DREF },
++{"vssseg8e8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG8E8V, MASK_VSSSEG8E8V, match_vd_neq_vm, INSN_DREF },
++
++{"vlsseg2e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG2E16V, MASK_VLSSEG2E16V, match_vd_neq_vm, INSN_DREF },
++{"vssseg2e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG2E16V, MASK_VSSSEG2E16V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg3e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG3E16V, MASK_VLSSEG3E16V, match_vd_neq_vm, INSN_DREF },
++{"vssseg3e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG3E16V, MASK_VSSSEG3E16V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg4e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG4E16V, MASK_VLSSEG4E16V, match_vd_neq_vm, INSN_DREF },
++{"vssseg4e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG4E16V, MASK_VSSSEG4E16V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg5e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG5E16V, MASK_VLSSEG5E16V, match_vd_neq_vm, INSN_DREF },
++{"vssseg5e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG5E16V, MASK_VSSSEG5E16V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg6e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG6E16V, MASK_VLSSEG6E16V, match_vd_neq_vm, INSN_DREF },
++{"vssseg6e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG6E16V, MASK_VSSSEG6E16V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg7e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG7E16V, MASK_VLSSEG7E16V, match_vd_neq_vm, INSN_DREF },
++{"vssseg7e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG7E16V, MASK_VSSSEG7E16V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg8e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG8E16V, MASK_VLSSEG8E16V, match_vd_neq_vm, INSN_DREF },
++{"vssseg8e16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG8E16V, MASK_VSSSEG8E16V, match_vd_neq_vm, INSN_DREF },
++
++{"vlsseg2e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG2E32V, MASK_VLSSEG2E32V, match_vd_neq_vm, INSN_DREF },
++{"vssseg2e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG2E32V, MASK_VSSSEG2E32V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg3e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG3E32V, MASK_VLSSEG3E32V, match_vd_neq_vm, INSN_DREF },
++{"vssseg3e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG3E32V, MASK_VSSSEG3E32V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg4e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG4E32V, MASK_VLSSEG4E32V, match_vd_neq_vm, INSN_DREF },
++{"vssseg4e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG4E32V, MASK_VSSSEG4E32V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg5e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG5E32V, MASK_VLSSEG5E32V, match_vd_neq_vm, INSN_DREF },
++{"vssseg5e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG5E32V, MASK_VSSSEG5E32V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg6e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG6E32V, MASK_VLSSEG6E32V, match_vd_neq_vm, INSN_DREF },
++{"vssseg6e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG6E32V, MASK_VSSSEG6E32V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg7e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG7E32V, MASK_VLSSEG7E32V, match_vd_neq_vm, INSN_DREF },
++{"vssseg7e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG7E32V, MASK_VSSSEG7E32V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg8e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG8E32V, MASK_VLSSEG8E32V, match_vd_neq_vm, INSN_DREF },
++{"vssseg8e32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG8E32V, MASK_VSSSEG8E32V, match_vd_neq_vm, INSN_DREF },
++
++{"vlsseg2e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG2E64V, MASK_VLSSEG2E64V, match_vd_neq_vm, INSN_DREF },
++{"vssseg2e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG2E64V, MASK_VSSSEG2E64V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg3e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG3E64V, MASK_VLSSEG3E64V, match_vd_neq_vm, INSN_DREF },
++{"vssseg3e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG3E64V, MASK_VSSSEG3E64V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg4e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG4E64V, MASK_VLSSEG4E64V, match_vd_neq_vm, INSN_DREF },
++{"vssseg4e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG4E64V, MASK_VSSSEG4E64V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg5e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG5E64V, MASK_VLSSEG5E64V, match_vd_neq_vm, INSN_DREF },
++{"vssseg5e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG5E64V, MASK_VSSSEG5E64V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg6e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG6E64V, MASK_VLSSEG6E64V, match_vd_neq_vm, INSN_DREF },
++{"vssseg6e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG6E64V, MASK_VSSSEG6E64V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg7e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG7E64V, MASK_VLSSEG7E64V, match_vd_neq_vm, INSN_DREF },
++{"vssseg7e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG7E64V, MASK_VSSSEG7E64V, match_vd_neq_vm, INSN_DREF },
++{"vlsseg8e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VLSSEG8E64V, MASK_VLSSEG8E64V, match_vd_neq_vm, INSN_DREF },
++{"vssseg8e64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),tVm", MATCH_VSSSEG8E64V, MASK_VSSSEG8E64V, match_vd_neq_vm, INSN_DREF },
++
++{"vloxseg2ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG2EI8V, MASK_VLOXSEG2EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg2ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG2EI8V, MASK_VSOXSEG2EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg3ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG3EI8V, MASK_VLOXSEG3EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg3ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG3EI8V, MASK_VSOXSEG3EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg4ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG4EI8V, MASK_VLOXSEG4EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg4ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG4EI8V, MASK_VSOXSEG4EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg5ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG5EI8V, MASK_VLOXSEG5EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg5ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG5EI8V, MASK_VSOXSEG5EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg6ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG6EI8V, MASK_VLOXSEG6EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg6ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG6EI8V, MASK_VSOXSEG6EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg7ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG7EI8V, MASK_VLOXSEG7EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg7ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG7EI8V, MASK_VSOXSEG7EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg8ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG8EI8V, MASK_VLOXSEG8EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg8ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG8EI8V, MASK_VSOXSEG8EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vloxseg2ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG2EI16V, MASK_VLOXSEG2EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg2ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG2EI16V, MASK_VSOXSEG2EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg3ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG3EI16V, MASK_VLOXSEG3EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg3ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG3EI16V, MASK_VSOXSEG3EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg4ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG4EI16V, MASK_VLOXSEG4EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg4ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG4EI16V, MASK_VSOXSEG4EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg5ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG5EI16V, MASK_VLOXSEG5EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg5ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG5EI16V, MASK_VSOXSEG5EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg6ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG6EI16V, MASK_VLOXSEG6EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg6ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG6EI16V, MASK_VSOXSEG6EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg7ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG7EI16V, MASK_VLOXSEG7EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg7ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG7EI16V, MASK_VSOXSEG7EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg8ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG8EI16V, MASK_VLOXSEG8EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg8ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG8EI16V, MASK_VSOXSEG8EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vloxseg2ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG2EI32V, MASK_VLOXSEG2EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg2ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG2EI32V, MASK_VSOXSEG2EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg3ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG3EI32V, MASK_VLOXSEG3EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg3ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG3EI32V, MASK_VSOXSEG3EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg4ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG4EI32V, MASK_VLOXSEG4EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg4ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG4EI32V, MASK_VSOXSEG4EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg5ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG5EI32V, MASK_VLOXSEG5EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg5ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG5EI32V, MASK_VSOXSEG5EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg6ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG6EI32V, MASK_VLOXSEG6EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg6ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG6EI32V, MASK_VSOXSEG6EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg7ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG7EI32V, MASK_VLOXSEG7EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg7ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG7EI32V, MASK_VSOXSEG7EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg8ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG8EI32V, MASK_VLOXSEG8EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg8ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG8EI32V, MASK_VSOXSEG8EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vloxseg2ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG2EI64V, MASK_VLOXSEG2EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg2ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG2EI64V, MASK_VSOXSEG2EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg3ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG3EI64V, MASK_VLOXSEG3EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg3ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG3EI64V, MASK_VSOXSEG3EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg4ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG4EI64V, MASK_VLOXSEG4EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg4ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG4EI64V, MASK_VSOXSEG4EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg5ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG5EI64V, MASK_VLOXSEG5EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg5ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG5EI64V, MASK_VSOXSEG5EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg6ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG6EI64V, MASK_VLOXSEG6EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg6ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG6EI64V, MASK_VSOXSEG6EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg7ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG7EI64V, MASK_VLOXSEG7EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg7ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG7EI64V, MASK_VSOXSEG7EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vloxseg8ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLOXSEG8EI64V, MASK_VLOXSEG8EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsoxseg8ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSOXSEG8EI64V, MASK_VSOXSEG8EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vluxseg2ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG2EI8V, MASK_VLUXSEG2EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg2ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG2EI8V, MASK_VSUXSEG2EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg3ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG3EI8V, MASK_VLUXSEG3EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg3ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG3EI8V, MASK_VSUXSEG3EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg4ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG4EI8V, MASK_VLUXSEG4EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg4ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG4EI8V, MASK_VSUXSEG4EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg5ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG5EI8V, MASK_VLUXSEG5EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg5ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG5EI8V, MASK_VSUXSEG5EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg6ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG6EI8V, MASK_VLUXSEG6EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg6ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG6EI8V, MASK_VSUXSEG6EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg7ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG7EI8V, MASK_VLUXSEG7EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg7ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG7EI8V, MASK_VSUXSEG7EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg8ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG8EI8V, MASK_VLUXSEG8EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg8ei8.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG8EI8V, MASK_VSUXSEG8EI8V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vluxseg2ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG2EI16V, MASK_VLUXSEG2EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg2ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG2EI16V, MASK_VSUXSEG2EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg3ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG3EI16V, MASK_VLUXSEG3EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg3ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG3EI16V, MASK_VSUXSEG3EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg4ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG4EI16V, MASK_VLUXSEG4EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg4ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG4EI16V, MASK_VSUXSEG4EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg5ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG5EI16V, MASK_VLUXSEG5EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg5ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG5EI16V, MASK_VSUXSEG5EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg6ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG6EI16V, MASK_VLUXSEG6EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg6ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG6EI16V, MASK_VSUXSEG6EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg7ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG7EI16V, MASK_VLUXSEG7EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg7ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG7EI16V, MASK_VSUXSEG7EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg8ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG8EI16V, MASK_VLUXSEG8EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg8ei16.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG8EI16V, MASK_VSUXSEG8EI16V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vluxseg2ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG2EI32V, MASK_VLUXSEG2EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg2ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG2EI32V, MASK_VSUXSEG2EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg3ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG3EI32V, MASK_VLUXSEG3EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg3ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG3EI32V, MASK_VSUXSEG3EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg4ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG4EI32V, MASK_VLUXSEG4EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg4ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG4EI32V, MASK_VSUXSEG4EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg5ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG5EI32V, MASK_VLUXSEG5EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg5ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG5EI32V, MASK_VSUXSEG5EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg6ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG6EI32V, MASK_VLUXSEG6EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg6ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG6EI32V, MASK_VSUXSEG6EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg7ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG7EI32V, MASK_VLUXSEG7EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg7ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG7EI32V, MASK_VSUXSEG7EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg8ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG8EI32V, MASK_VLUXSEG8EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg8ei32.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG8EI32V, MASK_VSUXSEG8EI32V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vluxseg2ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG2EI64V, MASK_VLUXSEG2EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg2ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG2EI64V, MASK_VSUXSEG2EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg3ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG3EI64V, MASK_VLUXSEG3EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg3ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG3EI64V, MASK_VSUXSEG3EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg4ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG4EI64V, MASK_VLUXSEG4EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg4ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG4EI64V, MASK_VSUXSEG4EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg5ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG5EI64V, MASK_VLUXSEG5EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg5ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG5EI64V, MASK_VSUXSEG5EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg6ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG6EI64V, MASK_VLUXSEG6EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg6ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG6EI64V, MASK_VSUXSEG6EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg7ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG7EI64V, MASK_VLUXSEG7EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg7ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG7EI64V, MASK_VSUXSEG7EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vluxseg8ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VLUXSEG8EI64V, MASK_VLUXSEG8EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++{"vsuxseg8ei64.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s),VtVm", MATCH_VSUXSEG8EI64V, MASK_VSUXSEG8EI64V, match_vd_neq_vs2_neq_vm, INSN_DREF },
++
++{"vlseg2e8ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E8FFV, MASK_VLSEG2E8FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e8ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E8FFV, MASK_VLSEG3E8FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e8ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E8FFV, MASK_VLSEG4E8FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e8ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E8FFV, MASK_VLSEG5E8FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e8ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E8FFV, MASK_VLSEG6E8FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e8ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E8FFV, MASK_VLSEG7E8FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e8ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E8FFV, MASK_VLSEG8E8FFV, match_vd_neq_vm, INSN_DREF },
++
++{"vlseg2e16ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E16FFV, MASK_VLSEG2E16FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e16ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E16FFV, MASK_VLSEG3E16FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e16ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E16FFV, MASK_VLSEG4E16FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e16ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E16FFV, MASK_VLSEG5E16FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e16ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E16FFV, MASK_VLSEG6E16FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e16ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E16FFV, MASK_VLSEG7E16FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e16ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E16FFV, MASK_VLSEG8E16FFV, match_vd_neq_vm, INSN_DREF },
++
++{"vlseg2e32ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E32FFV, MASK_VLSEG2E32FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e32ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E32FFV, MASK_VLSEG3E32FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e32ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E32FFV, MASK_VLSEG4E32FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e32ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E32FFV, MASK_VLSEG5E32FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e32ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E32FFV, MASK_VLSEG6E32FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e32ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E32FFV, MASK_VLSEG7E32FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e32ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E32FFV, MASK_VLSEG8E32FFV, match_vd_neq_vm, INSN_DREF },
++
++{"vlseg2e64ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG2E64FFV, MASK_VLSEG2E64FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg3e64ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG3E64FFV, MASK_VLSEG3E64FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg4e64ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG4E64FFV, MASK_VLSEG4E64FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg5e64ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG5E64FFV, MASK_VLSEG5E64FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg6e64ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG6E64FFV, MASK_VLSEG6E64FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg7e64ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG7E64FFV, MASK_VLSEG7E64FFV, match_vd_neq_vm, INSN_DREF },
++{"vlseg8e64ff.v",  0, INSN_CLASS_V_OR_ZVLSSEG,  "Vd,0(s)Vm", MATCH_VLSEG8E64FFV, MASK_VLSEG8E64FFV, match_vd_neq_vm, INSN_DREF },
++
++{"vl1r.v",      0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL1RE8V, MASK_VL1RE8V, match_vls_nf_rv, INSN_DREF|INSN_ALIAS },
++{"vl1re8.v",    0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL1RE8V, MASK_VL1RE8V, match_vls_nf_rv, INSN_DREF },
++{"vl1re16.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL1RE16V, MASK_VL1RE16V, match_vls_nf_rv, INSN_DREF },
++{"vl1re32.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL1RE32V, MASK_VL1RE32V, match_vls_nf_rv, INSN_DREF },
++{"vl1re64.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL1RE64V, MASK_VL1RE64V, match_vls_nf_rv, INSN_DREF },
++
++{"vl2r.v",      0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL2RE8V, MASK_VL2RE8V, match_vls_nf_rv, INSN_DREF|INSN_ALIAS },
++{"vl2re8.v",    0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL2RE8V, MASK_VL2RE8V, match_vls_nf_rv, INSN_DREF },
++{"vl2re16.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL2RE16V, MASK_VL2RE16V, match_vls_nf_rv, INSN_DREF },
++{"vl2re32.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL2RE32V, MASK_VL2RE32V, match_vls_nf_rv, INSN_DREF },
++{"vl2re64.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL2RE64V, MASK_VL2RE64V, match_vls_nf_rv, INSN_DREF },
++
++{"vl4r.v",      0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL4RE8V, MASK_VL4RE8V, match_vls_nf_rv, INSN_DREF|INSN_ALIAS },
++{"vl4re8.v",    0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL4RE8V, MASK_VL4RE8V, match_vls_nf_rv, INSN_DREF },
++{"vl4re16.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL4RE16V, MASK_VL4RE16V, match_vls_nf_rv, INSN_DREF },
++{"vl4re32.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL4RE32V, MASK_VL4RE32V, match_vls_nf_rv, INSN_DREF },
++{"vl4re64.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL4RE64V, MASK_VL4RE64V, match_vls_nf_rv, INSN_DREF },
++
++{"vl8r.v",      0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL8RE8V, MASK_VL8RE8V, match_vls_nf_rv, INSN_DREF|INSN_ALIAS },
++{"vl8re8.v",    0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL8RE8V, MASK_VL8RE8V, match_vls_nf_rv, INSN_DREF },
++{"vl8re16.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL8RE16V, MASK_VL8RE16V, match_vls_nf_rv, INSN_DREF },
++{"vl8re32.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL8RE32V, MASK_VL8RE32V, match_vls_nf_rv, INSN_DREF },
++{"vl8re64.v",   0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VL8RE64V, MASK_VL8RE64V, match_vls_nf_rv, INSN_DREF },
++
++{"vs1r.v",  0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VS1RV, MASK_VS1RV, match_vls_nf_rv, INSN_DREF },
++{"vs2r.v",  0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VS2RV, MASK_VS2RV, match_vls_nf_rv, INSN_DREF },
++{"vs4r.v",  0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VS4RV, MASK_VS4RV, match_vls_nf_rv, INSN_DREF },
++{"vs8r.v",  0, INSN_CLASS_V,  "Vd,0(s)", MATCH_VS8RV, MASK_VS8RV, match_vls_nf_rv, INSN_DREF },
++
++{"vamoaddei8.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOADDEI8V, MASK_VAMOADDEI8V, match_vd_neq_vm, INSN_DREF},
++{"vamoswapei8.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOSWAPEI8V, MASK_VAMOSWAPEI8V, match_vd_neq_vm, INSN_DREF},
++{"vamoxorei8.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOXOREI8V, MASK_VAMOXOREI8V, match_vd_neq_vm, INSN_DREF},
++{"vamoandei8.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOANDEI8V, MASK_VAMOANDEI8V, match_vd_neq_vm, INSN_DREF},
++{"vamoorei8.v",    0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOOREI8V, MASK_VAMOOREI8V, match_vd_neq_vm, INSN_DREF},
++{"vamominei8.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINEI8V, MASK_VAMOMINEI8V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxei8.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXEI8V, MASK_VAMOMAXEI8V, match_vd_neq_vm, INSN_DREF},
++{"vamominuei8.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINUEI8V, MASK_VAMOMINUEI8V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxuei8.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXUEI8V, MASK_VAMOMAXUEI8V, match_vd_neq_vm, INSN_DREF},
++
++{"vamoaddei16.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOADDEI16V, MASK_VAMOADDEI16V, match_vd_neq_vm, INSN_DREF},
++{"vamoswapei16.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOSWAPEI16V, MASK_VAMOSWAPEI16V, match_vd_neq_vm, INSN_DREF},
++{"vamoxorei16.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOXOREI16V, MASK_VAMOXOREI16V, match_vd_neq_vm, INSN_DREF},
++{"vamoandei16.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOANDEI16V, MASK_VAMOANDEI16V, match_vd_neq_vm, INSN_DREF},
++{"vamoorei16.v",    0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOOREI16V, MASK_VAMOOREI16V, match_vd_neq_vm, INSN_DREF},
++{"vamominei16.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINEI16V, MASK_VAMOMINEI16V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxei16.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXEI16V, MASK_VAMOMAXEI16V, match_vd_neq_vm, INSN_DREF},
++{"vamominuei16.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINUEI16V, MASK_VAMOMINUEI16V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxuei16.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXUEI16V, MASK_VAMOMAXUEI16V, match_vd_neq_vm, INSN_DREF},
++
++{"vamoaddei32.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOADDEI32V, MASK_VAMOADDEI32V, match_vd_neq_vm, INSN_DREF},
++{"vamoswapei32.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOSWAPEI32V, MASK_VAMOSWAPEI32V, match_vd_neq_vm, INSN_DREF},
++{"vamoxorei32.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOXOREI32V, MASK_VAMOXOREI32V, match_vd_neq_vm, INSN_DREF},
++{"vamoandei32.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOANDEI32V, MASK_VAMOANDEI32V, match_vd_neq_vm, INSN_DREF},
++{"vamoorei32.v",    0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOOREI32V, MASK_VAMOOREI32V, match_vd_neq_vm, INSN_DREF},
++{"vamominei32.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINEI32V, MASK_VAMOMINEI32V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxei32.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXEI32V, MASK_VAMOMAXEI32V, match_vd_neq_vm, INSN_DREF},
++{"vamominuei32.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINUEI32V, MASK_VAMOMINUEI32V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxuei32.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXUEI32V, MASK_VAMOMAXUEI32V, match_vd_neq_vm, INSN_DREF},
++
++{"vamoaddei64.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOADDEI64V, MASK_VAMOADDEI64V, match_vd_neq_vm, INSN_DREF},
++{"vamoswapei64.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOSWAPEI64V, MASK_VAMOSWAPEI64V, match_vd_neq_vm, INSN_DREF},
++{"vamoxorei64.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOXOREI64V, MASK_VAMOXOREI64V, match_vd_neq_vm, INSN_DREF},
++{"vamoandei64.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOANDEI64V, MASK_VAMOANDEI64V, match_vd_neq_vm, INSN_DREF},
++{"vamoorei64.v",    0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOOREI64V, MASK_VAMOOREI64V, match_vd_neq_vm, INSN_DREF},
++{"vamominei64.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINEI64V, MASK_VAMOMINEI64V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxei64.v",   0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXEI64V, MASK_VAMOMAXEI64V, match_vd_neq_vm, INSN_DREF},
++{"vamominuei64.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMINUEI64V, MASK_VAMOMINUEI64V, match_vd_neq_vm, INSN_DREF},
++{"vamomaxuei64.v",  0, INSN_CLASS_V_OR_ZVAMO,  "Ve,0(s),Vt,VfVm", MATCH_VAMOMAXUEI64V, MASK_VAMOMAXUEI64V, match_vd_neq_vm, INSN_DREF},
++
++{"vneg.v",     0, INSN_CLASS_V,  "Vd,VtVm",  MATCH_VRSUBVX, MASK_VRSUBVX | MASK_RS1, match_vd_neq_vm, INSN_ALIAS },
++
++{"vadd.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VADDVV, MASK_VADDVV, match_vd_neq_vm, 0 },
++{"vadd.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VADDVX, MASK_VADDVX, match_vd_neq_vm, 0 },
++{"vadd.vi",    0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VADDVI, MASK_VADDVI, match_vd_neq_vm, 0 },
++{"vsub.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSUBVV, MASK_VSUBVV, match_vd_neq_vm, 0 },
++{"vsub.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSUBVX, MASK_VSUBVX, match_vd_neq_vm, 0 },
++{"vrsub.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VRSUBVX, MASK_VRSUBVX, match_vd_neq_vm, 0 },
++{"vrsub.vi",   0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VRSUBVI, MASK_VRSUBVI, match_vd_neq_vm, 0 },
++
++{"vwcvt.x.x.v",  0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VWCVTXXV, MASK_VWCVTXXV, match_widen_vd_neq_vs2_neq_vm, INSN_ALIAS },
++{"vwcvtu.x.x.v", 0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VWCVTUXXV, MASK_VWCVTUXXV, match_widen_vd_neq_vs2_neq_vm, INSN_ALIAS },
++
++{"vwaddu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWADDUVV, MASK_VWADDUVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0 },
++{"vwaddu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWADDUVX, MASK_VWADDUVX, match_widen_vd_neq_vs2_neq_vm, 0 },
++{"vwsubu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWSUBUVV, MASK_VWSUBUVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0 },
++{"vwsubu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWSUBUVX, MASK_VWSUBUVX, match_widen_vd_neq_vs2_neq_vm, 0 },
++{"vwadd.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWADDVV, MASK_VWADDVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0 },
++{"vwadd.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWADDVX, MASK_VWADDVX, match_widen_vd_neq_vs2_neq_vm, 0 },
++{"vwsub.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWSUBVV, MASK_VWSUBVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0 },
++{"vwsub.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWSUBVX, MASK_VWSUBVX, match_widen_vd_neq_vs2_neq_vm, 0 },
++{"vwaddu.wv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWADDUWV, MASK_VWADDUWV, match_widen_vd_neq_vs1_neq_vm, 0 },
++{"vwaddu.wx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWADDUWX, MASK_VWADDUWX, match_widen_vd_neq_vm, 0 },
++{"vwsubu.wv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWSUBUWV, MASK_VWSUBUWV, match_widen_vd_neq_vs1_neq_vm, 0 },
++{"vwsubu.wx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWSUBUWX, MASK_VWSUBUWX, match_widen_vd_neq_vm, 0 },
++{"vwadd.wv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWADDWV, MASK_VWADDWV, match_widen_vd_neq_vs1_neq_vm, 0 },
++{"vwadd.wx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWADDWX, MASK_VWADDWX, match_widen_vd_neq_vm, 0 },
++{"vwsub.wv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWSUBWV, MASK_VWSUBWV, match_widen_vd_neq_vs1_neq_vm, 0 },
++{"vwsub.wx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWSUBWX, MASK_VWSUBWX, match_widen_vd_neq_vm, 0 },
++
++{"vzext.vf2",  0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VZEXT_VF2, MASK_VZEXT_VF2, match_vd_neq_vm, 0 },
++{"vsext.vf2",  0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VSEXT_VF2, MASK_VSEXT_VF2, match_vd_neq_vm, 0 },
++{"vzext.vf4",  0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VZEXT_VF4, MASK_VZEXT_VF4, match_vd_neq_vm, 0 },
++{"vsext.vf4",  0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VSEXT_VF4, MASK_VSEXT_VF4, match_vd_neq_vm, 0 },
++{"vzext.vf8",  0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VZEXT_VF8, MASK_VZEXT_VF8, match_vd_neq_vm, 0 },
++{"vsext.vf8",  0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VSEXT_VF8, MASK_VSEXT_VF8, match_vd_neq_vm, 0 },
++
++{"vadc.vvm",   0, INSN_CLASS_V,  "Vd,Vt,Vs,V0", MATCH_VADCVVM, MASK_VADCVVM, match_vd_neq_vm, 0 },
++{"vadc.vxm",   0, INSN_CLASS_V,  "Vd,Vt,s,V0", MATCH_VADCVXM, MASK_VADCVXM, match_vd_neq_vm, 0 },
++{"vadc.vim",   0, INSN_CLASS_V,  "Vd,Vt,Vi,V0", MATCH_VADCVIM, MASK_VADCVIM, match_vd_neq_vm, 0 },
++{"vmadc.vvm",  0, INSN_CLASS_V,  "Vd,Vt,Vs,V0", MATCH_VMADCVVM, MASK_VMADCVVM, match_opcode, 0 },
++{"vmadc.vxm",  0, INSN_CLASS_V,  "Vd,Vt,s,V0", MATCH_VMADCVXM, MASK_VMADCVXM, match_opcode, 0 },
++{"vmadc.vim",  0, INSN_CLASS_V,  "Vd,Vt,Vi,V0", MATCH_VMADCVIM, MASK_VMADCVIM, match_opcode, 0 },
++{"vmadc.vv",   0, INSN_CLASS_V,  "Vd,Vt,Vs", MATCH_VMADCVV, MASK_VMADCVV, match_opcode, 0 },
++{"vmadc.vx",   0, INSN_CLASS_V,  "Vd,Vt,s", MATCH_VMADCVX, MASK_VMADCVX, match_opcode, 0 },
++{"vmadc.vi",   0, INSN_CLASS_V,  "Vd,Vt,Vi", MATCH_VMADCVI, MASK_VMADCVI, match_opcode, 0 },
++{"vsbc.vvm",   0, INSN_CLASS_V,  "Vd,Vt,Vs,V0", MATCH_VSBCVVM, MASK_VSBCVVM, match_vd_neq_vm, 0 },
++{"vsbc.vxm",   0, INSN_CLASS_V,  "Vd,Vt,s,V0", MATCH_VSBCVXM, MASK_VSBCVXM, match_vd_neq_vm, 0 },
++{"vmsbc.vvm",  0, INSN_CLASS_V,  "Vd,Vt,Vs,V0", MATCH_VMSBCVVM, MASK_VMSBCVVM, match_opcode, 0 },
++{"vmsbc.vxm",  0, INSN_CLASS_V,  "Vd,Vt,s,V0", MATCH_VMSBCVXM, MASK_VMSBCVXM, match_opcode, 0 },
++{"vmsbc.vv",   0, INSN_CLASS_V,  "Vd,Vt,Vs", MATCH_VMSBCVV, MASK_VMSBCVV, match_opcode, 0 },
++{"vmsbc.vx",   0, INSN_CLASS_V,  "Vd,Vt,s", MATCH_VMSBCVX, MASK_VMSBCVX, match_opcode, 0 },
++
++{"vnot.v",     0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VNOTV, MASK_VNOTV, match_vd_neq_vm, INSN_ALIAS },
++
++{"vand.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VANDVV, MASK_VANDVV, match_vd_neq_vm, 0 },
++{"vand.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VANDVX, MASK_VANDVX, match_vd_neq_vm, 0 },
++{"vand.vi",    0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VANDVI, MASK_VANDVI, match_vd_neq_vm, 0 },
++{"vor.vv",     0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VORVV, MASK_VORVV, match_vd_neq_vm, 0 },
++{"vor.vx",     0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VORVX, MASK_VORVX, match_vd_neq_vm, 0 },
++{"vor.vi",     0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VORVI, MASK_VORVI, match_vd_neq_vm, 0 },
++{"vxor.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VXORVV, MASK_VXORVV, match_vd_neq_vm, 0 },
++{"vxor.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VXORVX, MASK_VXORVX, match_vd_neq_vm, 0 },
++{"vxor.vi",    0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VXORVI, MASK_VXORVI, match_vd_neq_vm, 0 },
++
++{"vsll.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSLLVV, MASK_VSLLVV, match_vd_neq_vm, 0 },
++{"vsll.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSLLVX, MASK_VSLLVX, match_vd_neq_vm, 0 },
++{"vsll.vi",    0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VSLLVI, MASK_VSLLVI, match_vd_neq_vm, 0 },
++{"vsrl.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSRLVV, MASK_VSRLVV, match_vd_neq_vm, 0 },
++{"vsrl.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSRLVX, MASK_VSRLVX, match_vd_neq_vm, 0 },
++{"vsrl.vi",    0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VSRLVI, MASK_VSRLVI, match_vd_neq_vm, 0 },
++{"vsra.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSRAVV, MASK_VSRAVV, match_vd_neq_vm, 0 },
++{"vsra.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSRAVX, MASK_VSRAVX, match_vd_neq_vm, 0 },
++{"vsra.vi",    0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VSRAVI, MASK_VSRAVI, match_vd_neq_vm, 0 },
++
++{"vncvt.x.x.w",0, INSN_CLASS_V,  "Vd,VtVm", MATCH_VNCVTXXW, MASK_VNCVTXXW, match_narrow_vd_neq_vs2_neq_vm, INSN_ALIAS },
++
++{"vnsrl.wv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VNSRLWV, MASK_VNSRLWV, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnsrl.wx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VNSRLWX, MASK_VNSRLWX, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnsrl.wi",   0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VNSRLWI, MASK_VNSRLWI, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnsra.wv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VNSRAWV, MASK_VNSRAWV, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnsra.wx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VNSRAWX, MASK_VNSRAWX, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnsra.wi",   0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VNSRAWI, MASK_VNSRAWI, match_narrow_vd_neq_vs2_neq_vm, 0 },
++
++{"vmseq.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMSEQVV, MASK_VMSEQVV, match_opcode, 0 },
++{"vmseq.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSEQVX, MASK_VMSEQVX, match_opcode, 0 },
++{"vmseq.vi",   0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VMSEQVI, MASK_VMSEQVI, match_opcode, 0 },
++{"vmsne.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMSNEVV, MASK_VMSNEVV, match_opcode, 0 },
++{"vmsne.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSNEVX, MASK_VMSNEVX, match_opcode, 0 },
++{"vmsne.vi",   0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VMSNEVI, MASK_VMSNEVI, match_opcode, 0 },
++{"vmsltu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMSLTUVV, MASK_VMSLTUVV, match_opcode, 0 },
++{"vmsltu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSLTUVX, MASK_VMSLTUVX, match_opcode, 0 },
++{"vmslt.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMSLTVV, MASK_VMSLTVV, match_opcode, 0 },
++{"vmslt.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSLTVX, MASK_VMSLTVX, match_opcode, 0 },
++{"vmsleu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMSLEUVV, MASK_VMSLEUVV, match_opcode, 0 },
++{"vmsleu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSLEUVX, MASK_VMSLEUVX, match_opcode, 0 },
++{"vmsleu.vi",  0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VMSLEUVI, MASK_VMSLEUVI, match_opcode, 0 },
++{"vmsle.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMSLEVV, MASK_VMSLEVV, match_opcode, 0 },
++{"vmsle.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSLEVX, MASK_VMSLEVX, match_opcode, 0 },
++{"vmsle.vi",   0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VMSLEVI, MASK_VMSLEVI, match_opcode, 0 },
++{"vmsgtu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSGTUVX, MASK_VMSGTUVX, match_opcode, 0 },
++{"vmsgtu.vi",  0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VMSGTUVI, MASK_VMSGTUVI, match_opcode, 0 },
++{"vmsgt.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMSGTVX, MASK_VMSGTVX, match_opcode, 0 },
++{"vmsgt.vi",   0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VMSGTVI, MASK_VMSGTVI, match_opcode, 0 },
++
++/* These aliases are for assembly but not disassembly.  */
++{"vmsgt.vv",   0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VMSLTVV, MASK_VMSLTVV, match_opcode, INSN_ALIAS },
++{"vmsgtu.vv",  0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VMSLTUVV, MASK_VMSLTUVV, match_opcode, INSN_ALIAS },
++{"vmsge.vv",   0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VMSLEVV, MASK_VMSLEVV, match_opcode, INSN_ALIAS },
++{"vmsgeu.vv",  0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VMSLEUVV, MASK_VMSLEUVV, match_opcode, INSN_ALIAS },
++{"vmslt.vi",   0, INSN_CLASS_V,  "Vd,Vt,VkVm", MATCH_VMSLEVI, MASK_VMSLEVI, match_opcode, INSN_ALIAS },
++{"vmsltu.vi",  0, INSN_CLASS_V,  "Vd,Vu,0Vm", MATCH_VMSNEVV, MASK_VMSNEVV, match_opcode, INSN_ALIAS },
++{"vmsltu.vi",  0, INSN_CLASS_V,  "Vd,Vt,VkVm", MATCH_VMSLEUVI, MASK_VMSLEUVI, match_opcode, INSN_ALIAS },
++{"vmsge.vi",   0, INSN_CLASS_V,  "Vd,Vt,VkVm", MATCH_VMSGTVI, MASK_VMSGTVI, match_opcode, INSN_ALIAS },
++{"vmsgeu.vi",  0, INSN_CLASS_V,  "Vd,Vu,0Vm", MATCH_VMSEQVV, MASK_VMSEQVV, match_opcode, INSN_ALIAS },
++{"vmsgeu.vi",  0, INSN_CLASS_V,  "Vd,Vt,VkVm", MATCH_VMSGTUVI, MASK_VMSGTUVI, match_opcode, INSN_ALIAS },
++
++{"vmsge.vx",   0, INSN_CLASS_V, "Vd,Vt,sVm", 0, (int) M_VMSGE, match_never, INSN_MACRO },
++{"vmsge.vx",   0, INSN_CLASS_V, "Vd,Vt,s,VM,VT", 0, (int) M_VMSGE, match_never, INSN_MACRO },
++{"vmsgeu.vx",  0, INSN_CLASS_V, "Vd,Vt,sVm", 0, (int) M_VMSGEU, match_never, INSN_MACRO },
++{"vmsgeu.vx",  0, INSN_CLASS_V, "Vd,Vt,s,VM,VT", 0, (int) M_VMSGEU, match_never, INSN_MACRO },
++
++{"vminu.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMINUVV, MASK_VMINUVV, match_vd_neq_vm, 0},
++{"vminu.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMINUVX, MASK_VMINUVX, match_vd_neq_vm, 0},
++{"vmin.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMINVV, MASK_VMINVV, match_vd_neq_vm, 0},
++{"vmin.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMINVX, MASK_VMINVX, match_vd_neq_vm, 0},
++{"vmaxu.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMAXUVV, MASK_VMAXUVV, match_vd_neq_vm, 0},
++{"vmaxu.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMAXUVX, MASK_VMAXUVX, match_vd_neq_vm, 0},
++{"vmax.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMAXVV, MASK_VMAXVV, match_vd_neq_vm, 0},
++{"vmax.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMAXVX, MASK_VMAXVX, match_vd_neq_vm, 0},
++
++{"vmul.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMULVV, MASK_VMULVV, match_vd_neq_vm, 0 },
++{"vmul.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMULVX, MASK_VMULVX, match_vd_neq_vm, 0 },
++{"vmulh.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMULHVV, MASK_VMULHVV, match_vd_neq_vm, 0 },
++{"vmulh.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMULHVX, MASK_VMULHVX, match_vd_neq_vm, 0 },
++{"vmulhu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMULHUVV, MASK_VMULHUVV, match_vd_neq_vm, 0 },
++{"vmulhu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMULHUVX, MASK_VMULHUVX, match_vd_neq_vm, 0 },
++{"vmulhsu.vv", 0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VMULHSUVV, MASK_VMULHSUVV, match_vd_neq_vm, 0 },
++{"vmulhsu.vx", 0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VMULHSUVX, MASK_VMULHSUVX, match_vd_neq_vm, 0 },
++
++{"vwmul.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWMULVV, MASK_VWMULVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0 },
++{"vwmul.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWMULVX, MASK_VWMULVX, match_widen_vd_neq_vs2_neq_vm, 0 },
++{"vwmulu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWMULUVV, MASK_VWMULUVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0 },
++{"vwmulu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWMULUVX, MASK_VWMULUVX, match_widen_vd_neq_vs2_neq_vm, 0 },
++{"vwmulsu.vv", 0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VWMULSUVV, MASK_VWMULSUVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0 },
++{"vwmulsu.vx", 0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VWMULSUVX, MASK_VWMULSUVX, match_widen_vd_neq_vs2_neq_vm, 0 },
++
++{"vmacc.vv",   0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VMACCVV, MASK_VMACCVV, match_vd_neq_vm, 0},
++{"vmacc.vx",   0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VMACCVX, MASK_VMACCVX, match_vd_neq_vm, 0},
++{"vnmsac.vv",  0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VNMSACVV, MASK_VNMSACVV, match_vd_neq_vm, 0},
++{"vnmsac.vx",  0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VNMSACVX, MASK_VNMSACVX, match_vd_neq_vm, 0},
++{"vmadd.vv",   0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VMADDVV, MASK_VMADDVV, match_vd_neq_vm, 0},
++{"vmadd.vx",   0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VMADDVX, MASK_VMADDVX, match_vd_neq_vm, 0},
++{"vnmsub.vv",  0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VNMSUBVV, MASK_VNMSUBVV, match_vd_neq_vm, 0},
++{"vnmsub.vx",  0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VNMSUBVX, MASK_VNMSUBVX, match_vd_neq_vm, 0},
++
++{"vwmaccu.vv",  0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VWMACCUVV, MASK_VWMACCUVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vwmaccu.vx",  0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VWMACCUVX, MASK_VWMACCUVX, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vwmacc.vv",   0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VWMACCVV, MASK_VWMACCVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vwmacc.vx",   0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VWMACCVX, MASK_VWMACCVX, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vwmaccsu.vv", 0, INSN_CLASS_V,  "Vd,Vs,VtVm", MATCH_VWMACCSUVV, MASK_VWMACCSUVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vwmaccsu.vx", 0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VWMACCSUVX, MASK_VWMACCSUVX, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vwmaccus.vx", 0, INSN_CLASS_V,  "Vd,s,VtVm", MATCH_VWMACCUSVX, MASK_VWMACCUSVX, match_widen_vd_neq_vs2_neq_vm, 0},
++
++{"vdivu.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VDIVUVV, MASK_VDIVUVV, match_vd_neq_vm, 0 },
++{"vdivu.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VDIVUVX, MASK_VDIVUVX, match_vd_neq_vm, 0 },
++{"vdiv.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VDIVVV, MASK_VDIVVV, match_vd_neq_vm, 0 },
++{"vdiv.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VDIVVX, MASK_VDIVVX, match_vd_neq_vm, 0 },
++{"vremu.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VREMUVV, MASK_VREMUVV, match_vd_neq_vm, 0 },
++{"vremu.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VREMUVX, MASK_VREMUVX, match_vd_neq_vm, 0 },
++{"vrem.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VREMVV, MASK_VREMVV, match_vd_neq_vm, 0 },
++{"vrem.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VREMVX, MASK_VREMVX, match_vd_neq_vm, 0 },
++
++{"vmerge.vvm", 0, INSN_CLASS_V,  "Vd,Vt,Vs,V0", MATCH_VMERGEVVM, MASK_VMERGEVVM, match_opcode, 0 },
++{"vmerge.vxm", 0, INSN_CLASS_V,  "Vd,Vt,s,V0", MATCH_VMERGEVXM, MASK_VMERGEVXM, match_opcode, 0 },
++{"vmerge.vim", 0, INSN_CLASS_V,  "Vd,Vt,Vi,V0", MATCH_VMERGEVIM, MASK_VMERGEVIM, match_opcode, 0 },
++
++{"vmv.v.v",    0, INSN_CLASS_V,  "Vd,Vs", MATCH_VMVVV, MASK_VMVVV, match_opcode, 0 },
++{"vmv.v.x",    0, INSN_CLASS_V,  "Vd,s", MATCH_VMVVX, MASK_VMVVX, match_opcode, 0 },
++{"vmv.v.i",    0, INSN_CLASS_V,  "Vd,Vi", MATCH_VMVVI, MASK_VMVVI, match_opcode, 0 },
++
++{"vsaddu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSADDUVV, MASK_VSADDUVV, match_vd_neq_vm, 0 },
++{"vsaddu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSADDUVX, MASK_VSADDUVX, match_vd_neq_vm, 0 },
++{"vsaddu.vi",  0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VSADDUVI, MASK_VSADDUVI, match_vd_neq_vm, 0 },
++{"vsadd.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSADDVV, MASK_VSADDVV, match_vd_neq_vm, 0 },
++{"vsadd.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSADDVX, MASK_VSADDVX, match_vd_neq_vm, 0 },
++{"vsadd.vi",   0, INSN_CLASS_V,  "Vd,Vt,ViVm", MATCH_VSADDVI, MASK_VSADDVI, match_vd_neq_vm, 0 },
++{"vssubu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSSUBUVV, MASK_VSSUBUVV, match_vd_neq_vm, 0 },
++{"vssubu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSSUBUVX, MASK_VSSUBUVX, match_vd_neq_vm, 0 },
++{"vssub.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSSUBVV, MASK_VSSUBVV, match_vd_neq_vm, 0 },
++{"vssub.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSSUBVX, MASK_VSSUBVX, match_vd_neq_vm, 0 },
++
++{"vaaddu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VAADDUVV, MASK_VAADDUVV, match_vd_neq_vm, 0 },
++{"vaaddu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VAADDUVX, MASK_VAADDUVX, match_vd_neq_vm, 0 },
++{"vaadd.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VAADDVV, MASK_VAADDVV, match_vd_neq_vm, 0 },
++{"vaadd.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VAADDVX, MASK_VAADDVX, match_vd_neq_vm, 0 },
++{"vasubu.vv",  0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VASUBUVV, MASK_VASUBUVV, match_vd_neq_vm, 0 },
++{"vasubu.vx",  0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VASUBUVX, MASK_VASUBUVX, match_vd_neq_vm, 0 },
++{"vasub.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VASUBVV, MASK_VASUBVV, match_vd_neq_vm, 0 },
++{"vasub.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VASUBVX, MASK_VASUBVX, match_vd_neq_vm, 0 },
++
++{"vsmul.vv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSMULVV, MASK_VSMULVV, match_vd_neq_vm, 0 },
++{"vsmul.vx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSMULVX, MASK_VSMULVX, match_vd_neq_vm, 0 },
++
++{"vssrl.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSSRLVV, MASK_VSSRLVV, match_vd_neq_vm, 0 },
++{"vssrl.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSSRLVX, MASK_VSSRLVX, match_vd_neq_vm, 0 },
++{"vssrl.vi",    0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VSSRLVI, MASK_VSSRLVI, match_vd_neq_vm, 0 },
++{"vssra.vv",    0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VSSRAVV, MASK_VSSRAVV, match_vd_neq_vm, 0 },
++{"vssra.vx",    0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VSSRAVX, MASK_VSSRAVX, match_vd_neq_vm, 0 },
++{"vssra.vi",    0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VSSRAVI, MASK_VSSRAVI, match_vd_neq_vm, 0 },
++
++{"vnclipu.wv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VNCLIPUWV, MASK_VNCLIPUWV, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnclipu.wx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VNCLIPUWX, MASK_VNCLIPUWX, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnclipu.wi",   0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VNCLIPUWI, MASK_VNCLIPUWI, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnclip.wv",   0, INSN_CLASS_V,  "Vd,Vt,VsVm", MATCH_VNCLIPWV, MASK_VNCLIPWV, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnclip.wx",   0, INSN_CLASS_V,  "Vd,Vt,sVm", MATCH_VNCLIPWX, MASK_VNCLIPWX, match_narrow_vd_neq_vs2_neq_vm, 0 },
++{"vnclip.wi",   0, INSN_CLASS_V,  "Vd,Vt,VjVm", MATCH_VNCLIPWI, MASK_VNCLIPWI, match_narrow_vd_neq_vs2_neq_vm, 0 },
++
++{"vfadd.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFADDVV, MASK_VFADDVV, match_vd_neq_vm, 0},
++{"vfadd.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFADDVF, MASK_VFADDVF, match_vd_neq_vm, 0},
++{"vfsub.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFSUBVV, MASK_VFSUBVV, match_vd_neq_vm, 0},
++{"vfsub.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFSUBVF, MASK_VFSUBVF, match_vd_neq_vm, 0},
++{"vfrsub.vf",  0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFRSUBVF, MASK_VFRSUBVF, match_vd_neq_vm, 0},
++
++{"vfwadd.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWADDVV, MASK_VFWADDVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vfwadd.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFWADDVF, MASK_VFWADDVF, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwsub.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWSUBVV, MASK_VFWSUBVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vfwsub.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFWSUBVF, MASK_VFWSUBVF, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwadd.wv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWADDWV, MASK_VFWADDWV, match_widen_vd_neq_vs1_neq_vm, 0},
++{"vfwadd.wf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFWADDWF, MASK_VFWADDWF, match_widen_vd_neq_vm, 0},
++{"vfwsub.wv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWSUBWV, MASK_VFWSUBWV, match_widen_vd_neq_vs1_neq_vm, 0},
++{"vfwsub.wf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFWSUBWF, MASK_VFWSUBWF, match_widen_vd_neq_vm, 0},
++
++{"vfmul.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFMULVV, MASK_VFMULVV, match_vd_neq_vm, 0},
++{"vfmul.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFMULVF, MASK_VFMULVF, match_vd_neq_vm, 0},
++{"vfdiv.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFDIVVV, MASK_VFDIVVV, match_vd_neq_vm, 0},
++{"vfdiv.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFDIVVF, MASK_VFDIVVF, match_vd_neq_vm, 0},
++{"vfrdiv.vf",  0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFRDIVVF, MASK_VFRDIVVF, match_vd_neq_vm, 0},
++
++{"vfwmul.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWMULVV, MASK_VFWMULVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vfwmul.vf",  0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFWMULVF, MASK_VFWMULVF, match_widen_vd_neq_vs2_neq_vm, 0},
++
++{"vfmadd.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFMADDVV, MASK_VFMADDVV, match_vd_neq_vm, 0},
++{"vfmadd.vf",  0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFMADDVF, MASK_VFMADDVF, match_vd_neq_vm, 0},
++{"vfnmadd.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFNMADDVV, MASK_VFNMADDVV, match_vd_neq_vm, 0},
++{"vfnmadd.vf", 0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFNMADDVF, MASK_VFNMADDVF, match_vd_neq_vm, 0},
++{"vfmsub.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFMSUBVV, MASK_VFMSUBVV, match_vd_neq_vm, 0},
++{"vfmsub.vf",  0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFMSUBVF, MASK_VFMSUBVF, match_vd_neq_vm, 0},
++{"vfnmsub.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFNMSUBVV, MASK_VFNMSUBVV, match_vd_neq_vm, 0},
++{"vfnmsub.vf", 0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFNMSUBVF, MASK_VFNMSUBVF, match_vd_neq_vm, 0},
++{"vfmacc.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFMACCVV, MASK_VFMACCVV, match_vd_neq_vm, 0},
++{"vfmacc.vf",  0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFMACCVF, MASK_VFMACCVF, match_vd_neq_vm, 0},
++{"vfnmacc.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFNMACCVV, MASK_VFNMACCVV, match_vd_neq_vm, 0},
++{"vfnmacc.vf", 0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFNMACCVF, MASK_VFNMACCVF, match_vd_neq_vm, 0},
++{"vfmsac.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFMSACVV, MASK_VFMSACVV, match_vd_neq_vm, 0},
++{"vfmsac.vf",  0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFMSACVF, MASK_VFMSACVF, match_vd_neq_vm, 0},
++{"vfnmsac.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFNMSACVV, MASK_VFNMSACVV, match_vd_neq_vm, 0},
++{"vfnmsac.vf", 0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFNMSACVF, MASK_VFNMSACVF, match_vd_neq_vm, 0},
++
++{"vfwmacc.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFWMACCVV, MASK_VFWMACCVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vfwmacc.vf",  0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFWMACCVF, MASK_VFWMACCVF, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwnmacc.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFWNMACCVV, MASK_VFWNMACCVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vfwnmacc.vf", 0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFWNMACCVF, MASK_VFWNMACCVF, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwmsac.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFWMSACVV, MASK_VFWMSACVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vfwmsac.vf",  0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFWMSACVF, MASK_VFWMSACVF, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwnmsac.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VFWNMSACVV, MASK_VFWNMSACVV, match_widen_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vfwnmsac.vf", 0, INSN_CLASS_V_AND_F, "Vd,S,VtVm", MATCH_VFWNMSACVF, MASK_VFWNMSACVF, match_widen_vd_neq_vs2_neq_vm, 0},
++
++{"vfsqrt.v",   0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFSQRTV, MASK_VFSQRTV, match_vd_neq_vm, 0},
++{"vfrsqrt7.v", 0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFRSQRT7V, MASK_VFRSQRT7V, match_vd_neq_vm, 0},
++{"vfrsqrte7.v",0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFRSQRT7V, MASK_VFRSQRT7V, match_vd_neq_vm, 0},
++{"vfrec7.v",   0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFREC7V, MASK_VFREC7V, match_vd_neq_vm, 0},
++{"vfrece7.v",  0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFREC7V, MASK_VFREC7V, match_vd_neq_vm, 0},
++{"vfclass.v",  0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFCLASSV, MASK_VFCLASSV, match_vd_neq_vm, 0},
++
++{"vfmin.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFMINVV, MASK_VFMINVV, match_vd_neq_vm, 0},
++{"vfmin.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFMINVF, MASK_VFMINVF, match_vd_neq_vm, 0},
++{"vfmax.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFMAXVV, MASK_VFMAXVV, match_vd_neq_vm, 0},
++{"vfmax.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFMAXVF, MASK_VFMAXVF, match_vd_neq_vm, 0},
++
++{"vfneg.v",    0, INSN_CLASS_V_AND_F, "Vd,VuVm", MATCH_VFSGNJNVV, MASK_VFSGNJNVV, match_vs1_eq_vs2_neq_vm, INSN_ALIAS },
++{"vfabs.v",    0, INSN_CLASS_V_AND_F, "Vd,VuVm", MATCH_VFSGNJXVV, MASK_VFSGNJXVV, match_vs1_eq_vs2_neq_vm, INSN_ALIAS },
++
++{"vfsgnj.vv",  0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFSGNJVV, MASK_VFSGNJVV, match_vd_neq_vm, 0},
++{"vfsgnj.vf",  0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFSGNJVF, MASK_VFSGNJVF, match_vd_neq_vm, 0},
++{"vfsgnjn.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFSGNJNVV, MASK_VFSGNJNVV, match_vd_neq_vm, 0},
++{"vfsgnjn.vf", 0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFSGNJNVF, MASK_VFSGNJNVF, match_vd_neq_vm, 0},
++{"vfsgnjx.vv", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFSGNJXVV, MASK_VFSGNJXVV, match_vd_neq_vm, 0},
++{"vfsgnjx.vf", 0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFSGNJXVF, MASK_VFSGNJXVF, match_vd_neq_vm, 0},
++
++{"vmfeq.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VMFEQVV, MASK_VMFEQVV, match_opcode, 0},
++{"vmfeq.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VMFEQVF, MASK_VMFEQVF, match_opcode, 0},
++{"vmfne.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VMFNEVV, MASK_VMFNEVV, match_opcode, 0},
++{"vmfne.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VMFNEVF, MASK_VMFNEVF, match_opcode, 0},
++{"vmflt.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VMFLTVV, MASK_VMFLTVV, match_opcode, 0},
++{"vmflt.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VMFLTVF, MASK_VMFLTVF, match_opcode, 0},
++{"vmfle.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VMFLEVV, MASK_VMFLEVV, match_opcode, 0},
++{"vmfle.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VMFLEVF, MASK_VMFLEVF, match_opcode, 0},
++{"vmfgt.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VMFGTVF, MASK_VMFGTVF, match_opcode, 0},
++{"vmfge.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VMFGEVF, MASK_VMFGEVF, match_opcode, 0},
++
++/* These aliases are for assembly but not disassembly.  */
++{"vmfgt.vv",    0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VMFLTVV, MASK_VMFLTVV, match_opcode, INSN_ALIAS},
++{"vmfge.vv",   0, INSN_CLASS_V_AND_F, "Vd,Vs,VtVm", MATCH_VMFLEVV, MASK_VMFLEVV, match_opcode, INSN_ALIAS},
++
++{"vfmerge.vfm",0, INSN_CLASS_V_AND_F, "Vd,Vt,S,V0", MATCH_VFMERGEVFM, MASK_VFMERGEVFM, match_opcode, 0},
++{"vfmv.v.f",   0, INSN_CLASS_V_AND_F, "Vd,S", MATCH_VFMVVF, MASK_VFMVVF, match_opcode, 0 },
++
++{"vfcvt.xu.f.v",     0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFCVTXUFV, MASK_VFCVTXUFV, match_vd_neq_vm, 0},
++{"vfcvt.x.f.v",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFCVTXFV, MASK_VFCVTXFV, match_vd_neq_vm, 0},
++{"vfcvt.rtz.xu.f.v", 0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFCVTRTZXUFV, MASK_VFCVTRTZXUFV, match_vd_neq_vm, 0},
++{"vfcvt.rtz.x.f.v",  0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFCVTRTZXFV, MASK_VFCVTRTZXFV, match_vd_neq_vm, 0},
++{"vfcvt.f.xu.v",     0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFCVTFXUV, MASK_VFCVTFXUV, match_vd_neq_vm, 0},
++{"vfcvt.f.x.v",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFCVTFXV, MASK_VFCVTFXV, match_vd_neq_vm, 0},
++
++{"vfwcvt.xu.f.v",     0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFWCVTXUFV, MASK_VFWCVTXUFV, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwcvt.x.f.v",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFWCVTXFV, MASK_VFWCVTXFV, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwcvt.rtz.xu.f.v", 0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFWCVTRTZXUFV, MASK_VFWCVTRTZXUFV, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwcvt.rtz.x.f.v",  0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFWCVTRTZXFV, MASK_VFWCVTRTZXFV, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwcvt.f.xu.v",     0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFWCVTFXUV, MASK_VFWCVTFXUV, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwcvt.f.x.v",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFWCVTFXV, MASK_VFWCVTFXV, match_widen_vd_neq_vs2_neq_vm, 0},
++{"vfwcvt.f.f.v",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFWCVTFFV, MASK_VFWCVTFFV, match_widen_vd_neq_vs2_neq_vm, 0},
++
++{"vfncvt.xu.f.w",     0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTXUFW, MASK_VFNCVTXUFW, match_narrow_vd_neq_vs2_neq_vm, 0},
++{"vfncvt.x.f.w",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTXFW, MASK_VFNCVTXFW, match_narrow_vd_neq_vs2_neq_vm, 0},
++{"vfncvt.rtz.xu.f.w", 0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTRTZXUFW, MASK_VFNCVTRTZXUFW, match_narrow_vd_neq_vs2_neq_vm, 0},
++{"vfncvt.rtz.x.f.w",  0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTRTZXFW, MASK_VFNCVTRTZXFW, match_narrow_vd_neq_vs2_neq_vm, 0},
++{"vfncvt.f.xu.w",     0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTFXUW, MASK_VFNCVTFXUW, match_narrow_vd_neq_vs2_neq_vm, 0},
++{"vfncvt.f.x.w",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTFXW, MASK_VFNCVTFXW, match_narrow_vd_neq_vs2_neq_vm, 0},
++{"vfncvt.f.f.w",      0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTFFW, MASK_VFNCVTFFW, match_narrow_vd_neq_vs2_neq_vm, 0},
++{"vfncvt.rod.f.f.w",  0, INSN_CLASS_V_AND_F, "Vd,VtVm", MATCH_VFNCVTRODFFW, MASK_VFNCVTRODFFW, match_narrow_vd_neq_vs2_neq_vm, 0},
++
++{"vredsum.vs", 0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDSUMVS, MASK_VREDSUMVS, match_opcode, 0},
++{"vredmaxu.vs",0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDMAXUVS, MASK_VREDMAXUVS, match_opcode, 0},
++{"vredmax.vs", 0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDMAXVS, MASK_VREDMAXVS, match_opcode, 0},
++{"vredminu.vs",0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDMINUVS, MASK_VREDMINUVS, match_opcode, 0},
++{"vredmin.vs", 0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDMINVS, MASK_VREDMINVS, match_opcode, 0},
++{"vredand.vs", 0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDANDVS, MASK_VREDANDVS, match_opcode, 0},
++{"vredor.vs",  0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDORVS, MASK_VREDORVS, match_opcode, 0},
++{"vredxor.vs", 0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VREDXORVS, MASK_VREDXORVS, match_opcode, 0},
++
++{"vwredsumu.vs",0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VWREDSUMUVS, MASK_VWREDSUMUVS, match_opcode, 0},
++{"vwredsum.vs",0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VWREDSUMVS, MASK_VWREDSUMVS, match_opcode, 0},
++
++{"vfredosum.vs",0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFREDOSUMVS, MASK_VFREDOSUMVS, match_opcode, 0},
++{"vfredusum.vs", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFREDSUMVS, MASK_VFREDSUMVS, match_opcode, 0},
++{"vfredmax.vs", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFREDMAXVS, MASK_VFREDMAXVS, match_opcode, 0},
++{"vfredmin.vs", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFREDMINVS, MASK_VFREDMINVS, match_opcode, 0},
++
++{"vfwredosum.vs",0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWREDOSUMVS, MASK_VFWREDOSUMVS, match_opcode, 0},
++{"vfwredusum.vs", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWREDSUMVS, MASK_VFWREDSUMVS, match_opcode, 0},
++
++{"vfredsum.vs", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFREDSUMVS, MASK_VFREDSUMVS, match_opcode, INSN_ALIAS},
++{"vfwredsum.vs", 0, INSN_CLASS_V_AND_F, "Vd,Vt,VsVm", MATCH_VFWREDSUMVS, MASK_VFWREDSUMVS, match_opcode, INSN_ALIAS},
++{"vmmv.m",     0, INSN_CLASS_V, "Vd,Vu", MATCH_VMANDMM, MASK_VMANDMM, match_vs1_eq_vs2, INSN_ALIAS},
++{"vmcpy.m",    0, INSN_CLASS_V, "Vd,Vu", MATCH_VMANDMM, MASK_VMANDMM, match_vs1_eq_vs2, INSN_ALIAS},
++{"vmclr.m",    0, INSN_CLASS_V, "Vv", MATCH_VMXORMM, MASK_VMXORMM, match_vd_eq_vs1_eq_vs2, INSN_ALIAS},
++{"vmset.m",    0, INSN_CLASS_V, "Vv", MATCH_VMXNORMM, MASK_VMXNORMM, match_vd_eq_vs1_eq_vs2, INSN_ALIAS},
++{"vmnot.m",    0, INSN_CLASS_V, "Vd,Vu", MATCH_VMNANDMM, MASK_VMNANDMM, match_vs1_eq_vs2, INSN_ALIAS},
++
++{"vmand.mm",   0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMANDMM, MASK_VMANDMM, match_opcode, 0},
++{"vmnand.mm",  0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMNANDMM, MASK_VMNANDMM, match_opcode, 0},
++{"vmandnot.mm",0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMANDNOTMM, MASK_VMANDNOTMM, match_opcode, 0},
++{"vmxor.mm",   0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMXORMM, MASK_VMXORMM, match_opcode, 0},
++{"vmor.mm",    0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMORMM, MASK_VMORMM, match_opcode, 0},
++{"vmnor.mm",   0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMNORMM, MASK_VMNORMM, match_opcode, 0},
++{"vmornot.mm", 0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMORNOTMM, MASK_VMORNOTMM, match_opcode, 0},
++{"vmxnor.mm",  0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VMXNORMM, MASK_VMXNORMM, match_opcode, 0},
++
++{"vpopc.m",    0, INSN_CLASS_V, "d,VtVm", MATCH_VPOPCM, MASK_VPOPCM, match_opcode, 0},
++{"vfirst.m",   0, INSN_CLASS_V, "d,VtVm", MATCH_VFIRSTM, MASK_VFIRSTM, match_opcode, 0},
++{"vmsbf.m",    0, INSN_CLASS_V, "Vd,VtVm", MATCH_VMSBFM, MASK_VMSBFM, match_vd_neq_vs2_neq_vm, 0},
++{"vmsif.m",    0, INSN_CLASS_V, "Vd,VtVm", MATCH_VMSIFM, MASK_VMSIFM, match_vd_neq_vs2_neq_vm, 0},
++{"vmsof.m",    0, INSN_CLASS_V, "Vd,VtVm", MATCH_VMSOFM, MASK_VMSOFM, match_vd_neq_vs2_neq_vm, 0},
++{"viota.m",    0, INSN_CLASS_V, "Vd,VtVm", MATCH_VIOTAM, MASK_VIOTAM, match_vd_neq_vs2_neq_vm, 0},
++{"vid.v",      0, INSN_CLASS_V, "VdVm", MATCH_VIDV, MASK_VIDV, match_vd_neq_vm, 0},
++
++{"vmv.x.s",    0, INSN_CLASS_V, "d,Vt", MATCH_VMVXS, MASK_VMVXS, match_opcode, 0},
++{"vmv.s.x",    0, INSN_CLASS_V, "Vd,s", MATCH_VMVSX, MASK_VMVSX, match_opcode, 0},
++
++{"vfmv.f.s",   0, INSN_CLASS_V_AND_F, "D,Vt", MATCH_VFMVFS, MASK_VFMVFS, match_opcode, 0},
++{"vfmv.s.f",   0, INSN_CLASS_V_AND_F, "Vd,S", MATCH_VFMVSF, MASK_VFMVSF, match_opcode, 0},
++
++{"vslideup.vx",0, INSN_CLASS_V, "Vd,Vt,sVm", MATCH_VSLIDEUPVX, MASK_VSLIDEUPVX, match_vd_neq_vs2_neq_vm, 0},
++{"vslideup.vi",0, INSN_CLASS_V, "Vd,Vt,VjVm", MATCH_VSLIDEUPVI, MASK_VSLIDEUPVI, match_vd_neq_vs2_neq_vm, 0},
++{"vslidedown.vx",0,INSN_CLASS_V, "Vd,Vt,sVm", MATCH_VSLIDEDOWNVX, MASK_VSLIDEDOWNVX, match_vd_neq_vm, 0},
++{"vslidedown.vi",0,INSN_CLASS_V, "Vd,Vt,VjVm", MATCH_VSLIDEDOWNVI, MASK_VSLIDEDOWNVI, match_vd_neq_vm, 0},
++
++{"vslide1up.vx",    0, INSN_CLASS_V, "Vd,Vt,sVm", MATCH_VSLIDE1UPVX, MASK_VSLIDE1UPVX, match_vd_neq_vs2_neq_vm, 0},
++{"vslide1down.vx",  0, INSN_CLASS_V, "Vd,Vt,sVm", MATCH_VSLIDE1DOWNVX, MASK_VSLIDE1DOWNVX, match_vd_neq_vm, 0},
++{"vfslide1up.vf",   0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFSLIDE1UPVF, MASK_VFSLIDE1UPVF, match_vd_neq_vs2_neq_vm, 0},
++{"vfslide1down.vf", 0, INSN_CLASS_V_AND_F, "Vd,Vt,SVm", MATCH_VFSLIDE1DOWNVF, MASK_VFSLIDE1DOWNVF, match_vd_neq_vm, 0},
++
++{"vrgather.vv",    0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VRGATHERVV, MASK_VRGATHERVV, match_vd_neq_vs1_neq_vs2_neq_vm, 0},
++{"vrgather.vx",    0, INSN_CLASS_V, "Vd,Vt,sVm", MATCH_VRGATHERVX, MASK_VRGATHERVX, match_vd_neq_vs2_neq_vm, 0},
++{"vrgather.vi",    0, INSN_CLASS_V, "Vd,Vt,VjVm", MATCH_VRGATHERVI, MASK_VRGATHERVI, match_vd_neq_vs2_neq_vm, 0},
++{"vrgatherei16.vv",0, INSN_CLASS_V, "Vd,Vt,VsVm", MATCH_VRGATHEREI16VV, MASK_VRGATHEREI16VV, match_vd_neq_vs1_neq_vs2_neq_vm, 0},
++
++{"vcompress.vm",0, INSN_CLASS_V, "Vd,Vt,Vs", MATCH_VCOMPRESSVM, MASK_VCOMPRESSVM, match_vd_neq_vs1_neq_vs2, 0},
++
++{"vmv1r.v",    0, INSN_CLASS_V, "Vd,Vt", MATCH_VMV1RV, MASK_VMV1RV, match_vmv_nf_rv, 0},
++{"vmv2r.v",    0, INSN_CLASS_V, "Vd,Vt", MATCH_VMV2RV, MASK_VMV2RV, match_vmv_nf_rv, 0},
++{"vmv4r.v",    0, INSN_CLASS_V, "Vd,Vt", MATCH_VMV4RV, MASK_VMV4RV, match_vmv_nf_rv, 0},
++{"vmv8r.v",    0, INSN_CLASS_V, "Vd,Vt", MATCH_VMV8RV, MASK_VMV8RV, match_vmv_nf_rv, 0},
++
++
+ /* Terminate the list.  */
+ {0, 0, INSN_CLASS_NONE, 0, 0, 0, 0, 0}
+ };
+@@ -814,6 +2122,26 @@ const struct riscv_opcode riscv_insn_types[] =
+ {"r",       0, INSN_CLASS_F,  "O4,F3,F2,d,S,T,R",   0,    0,  match_opcode, 0 },
+ {"r",       0, INSN_CLASS_F,  "O4,F3,F2,D,S,T,R",   0,    0,  match_opcode, 0 },
+ 
++{"r",       0, INSN_CLASS_V,        "O4,F3,F7,Vd,s,t",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,Vd,S,t",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,Vd,s,T",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,Vd,S,T",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V,        "O4,F3,F7,d,Vs,t",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,D,Vs,t",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,d,Vs,T",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,D,Vs,T",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V,        "O4,F3,F7,d,s,Vt",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,D,s,Vt",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,d,S,Vt",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,D,S,Vt",    0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V,        "O4,F3,F7,Vd,Vs,t",   0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,Vd,Vs,T",   0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V,        "O4,F3,F7,Vd,s,Vt",   0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,Vd,S,Vt",   0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V,        "O4,F3,F7,d,Vs,Vt",   0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V_AND_F,  "O4,F3,F7,D,Vs,Vt",   0,    0,  match_opcode, 0 },
++{"r",       0, INSN_CLASS_V,        "O4,F3,F7,Vd,Vs,Vt",  0,    0,  match_opcode, 0 },
++
+ {"r4",      0, INSN_CLASS_I,  "O4,F3,F2,d,s,t,r",   0,    0,  match_opcode, 0 },
+ {"r4",      0, INSN_CLASS_F,  "O4,F3,F2,D,s,t,r",   0,    0,  match_opcode, 0 },
+ {"r4",      0, INSN_CLASS_F,  "O4,F3,F2,d,S,t,r",   0,    0,  match_opcode, 0 },
+@@ -935,6 +2263,12 @@ const struct riscv_ext_version riscv_ext_version_table[] =
+ {"zicsr", ISA_SPEC_CLASS_20191213, 2, 0},
+ {"zicsr", ISA_SPEC_CLASS_20190608, 2, 0},
+ 
++{"v", ISA_SPEC_CLASS_DRAFT, 1, 0},
++{"zba", ISA_SPEC_CLASS_DRAFT, 1, 0},
++{"zbb", ISA_SPEC_CLASS_DRAFT, 1, 0},
++{"zbc", ISA_SPEC_CLASS_DRAFT, 1, 0},
++{"zbs", ISA_SPEC_CLASS_DRAFT, 1, 0},
++
+ /* Terminate the list.  */
+ {NULL, 0, 0, 0}
+ };
+-- 
+2.33.1
+

+ 28 - 0
recipes-devtools/gdb/gdb/0001-make-man-install-relative-to-DESTDIR.patch

@@ -0,0 +1,28 @@
+From e5126c7167e26f865990dc5f86344602603aa8c6 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Mon, 2 Mar 2015 02:27:55 +0000
+Subject: [PATCH 01/11] make man install relative to DESTDIR
+
+Upstream-Status: Pending
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ sim/common/Makefile.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sim/common/Makefile.in b/sim/common/Makefile.in
+index f713fcaa35b..7c553709d3f 100644
+--- a/sim/common/Makefile.in
++++ b/sim/common/Makefile.in
+@@ -35,7 +35,7 @@ tooldir = $(libdir)/$(target_alias)
+ datarootdir = @datarootdir@
+ datadir = @datadir@
+ mandir = @mandir@
+-man1dir = $(mandir)/man1
++man1dir = $(DESTDIR)$(mandir)/man1
+ infodir = @infodir@
+ includedir = @includedir@
+ 
+-- 
+2.29.2
+

+ 35 - 0
recipes-devtools/gdb/gdb/0002-mips-linux-nat-Define-_ABIO32-if-not-defined.patch

@@ -0,0 +1,35 @@
+From 0680242c9a3a0149a23e63034ecb4404de2293dd Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Wed, 23 Mar 2016 06:30:09 +0000
+Subject: [PATCH 02/11] mips-linux-nat: Define _ABIO32 if not defined
+
+This helps building gdb on mips64 on musl, since
+musl does not provide sgidefs.h this define is
+only defined when GCC is using o32 ABI, in that
+case gcc emits it as built-in define and hence
+it works ok for mips32
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gdb/mips-linux-nat.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/gdb/mips-linux-nat.c b/gdb/mips-linux-nat.c
+index 38ff461a35b..4337795bac8 100644
+--- a/gdb/mips-linux-nat.c
++++ b/gdb/mips-linux-nat.c
+@@ -41,6 +41,10 @@
+ #ifndef PTRACE_GET_THREAD_AREA
+ #define PTRACE_GET_THREAD_AREA 25
+ #endif
++/* musl does not define and relies on compiler built-in macros for it   */
++#ifndef _ABIO32
++#define _ABIO32 1
++#endif
+ 
+ class mips_linux_nat_target final : public linux_nat_trad_target
+ {
+-- 
+2.29.2
+

+ 53 - 0
recipes-devtools/gdb/gdb/0003-ppc-ptrace-Define-pt_regs-uapi_pt_regs-on-GLIBC-syst.patch

@@ -0,0 +1,53 @@
+From afbb66c244b1ae0aaaa90d88d3cd484f741c614f Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sat, 30 Apr 2016 18:32:14 -0700
+Subject: [PATCH 03/11] ppc/ptrace: Define pt_regs uapi_pt_regs on !GLIBC
+ systems
+
+Upstream-Status: Pending
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gdb/nat/ppc-linux.h        | 6 ++++++
+ gdbserver/linux-ppc-low.cc | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/gdb/nat/ppc-linux.h b/gdb/nat/ppc-linux.h
+index d937a65b69c..1fd54b4a0e0 100644
+--- a/gdb/nat/ppc-linux.h
++++ b/gdb/nat/ppc-linux.h
+@@ -18,7 +18,13 @@
+ #ifndef NAT_PPC_LINUX_H
+ #define NAT_PPC_LINUX_H
+ 
++#if !defined(__GLIBC__)
++# define pt_regs uapi_pt_regs
++#endif
+ #include <asm/ptrace.h>
++#if !defined(__GLIBC__)
++# undef pt_regs
++#endif
+ #include <asm/cputable.h>
+ 
+ /* This sometimes isn't defined.  */
+diff --git a/gdbserver/linux-ppc-low.cc b/gdbserver/linux-ppc-low.cc
+index 337d555aee7..5d518f37268 100644
+--- a/gdbserver/linux-ppc-low.cc
++++ b/gdbserver/linux-ppc-low.cc
+@@ -23,7 +23,13 @@
+ #include "elf/common.h"
+ #include <sys/uio.h>
+ #include <elf.h>
++#if !defined(__GLIBC__)
++# define pt_regs uapi_pt_regs
++#endif
+ #include <asm/ptrace.h>
++#if !defined(__GLIBC__)
++# undef pt_regs
++#endif
+ 
+ #include "arch/ppc-linux-common.h"
+ #include "arch/ppc-linux-tdesc.h"
+-- 
+2.29.2
+

+ 913 - 0
recipes-devtools/gdb/gdb/0004-Add-support-for-Renesas-SH-sh4-architecture.patch

@@ -0,0 +1,913 @@
+From 26e406962cf7298837b350b979afff0ac34ecb0b Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Mon, 2 Mar 2015 02:31:12 +0000
+Subject: [PATCH 04/11] Add support for Renesas SH (sh4) architecture.
+
+gdb (7.4-1~cvs20111117.2) experimental; urgency=low
+ .
+   * Add Renesas SH (sh4) support (Closes: #576242)
+     - Thanks Nobuhiro Iwamatsu, Takashi Yoshii.
+Author: Hector Oron <zumbi@debian.org>
+Bug-Debian: http://bugs.debian.org/576242
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gdb/Makefile.in                      |   2 +
+ gdb/configure.host                   |   1 +
+ gdb/sh-linux-tdep.c                  | 519 +++++++++++++++++++++++++++
+ gdb/sh-tdep.c                        |  52 +--
+ gdb/sh-tdep.h                        |  49 +++
+ gdb/testsuite/gdb.asm/asm-source.exp |   5 +
+ gdb/testsuite/gdb.asm/sh.inc         |   3 +-
+ gdb/testsuite/gdb.base/annota1.c     |   3 +
+ gdb/testsuite/gdb.base/annota3.c     |   4 +
+ gdb/testsuite/gdb.base/sigall.c      |   3 +
+ gdb/testsuite/gdb.base/signals.c     |   4 +
+ 11 files changed, 617 insertions(+), 28 deletions(-)
+
+diff --git a/gdb/Makefile.in b/gdb/Makefile.in
+index 4808357e651..a009004ab05 100644
+--- a/gdb/Makefile.in
++++ b/gdb/Makefile.in
+@@ -2273,6 +2273,8 @@ ALLDEPFILES = \
+ 	sh-nbsd-nat.c \
+ 	sh-nbsd-tdep.c \
+ 	sh-tdep.c \
++	sh-linux-tdep.c \
++	sh-linux-nat.c \
+ 	sol2-tdep.c \
+ 	solib-aix.c \
+ 	solib-svr4.c \
+diff --git a/gdb/configure.host b/gdb/configure.host
+index ce528237291..5b5173a71aa 100644
+--- a/gdb/configure.host
++++ b/gdb/configure.host
+@@ -148,6 +148,7 @@ riscv*-*-linux*)	gdb_host=linux ;;
+ 
+ s390*-*-linux*)		gdb_host=linux ;;
+ 
++sh*-*-linux*)		gdb_host=linux ;;
+ sh*-*-netbsdelf* | sh*-*-knetbsd*-gnu)
+ 			gdb_host=nbsd ;;
+ sh*-*-openbsd*)		gdb_host=nbsd ;;
+diff --git a/gdb/sh-linux-tdep.c b/gdb/sh-linux-tdep.c
+index 5d2f38f5801..06a45b74827 100644
+--- a/gdb/sh-linux-tdep.c
++++ b/gdb/sh-linux-tdep.c
+@@ -18,14 +18,37 @@
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+ 
+ #include "defs.h"
++#include "gdbcore.h"
++#include "frame.h"
++#include "frame-base.h"
++#include "frame-unwind.h"
++#include "dwarf2-frame.h"
++#include "value.h"
++#include "regcache.h"
++#include "inferior.h"
+ #include "osabi.h"
+ 
++#include "reggroups.h"
++#include "arch-utils.h"
++#include "floatformat.h"
+ #include "solib-svr4.h"
+ #include "symtab.h"
++#include "gdb_string.h"
++#include "command.h"
++#include "gdb_assert.h"
+ 
+ #include "trad-frame.h"
+ #include "tramp-frame.h"
+ 
++#include <sys/ptrace.h>
++#include <sys/types.h>
++#include <sys/param.h>
++#include <sys/user.h>
++#include <sys/syscall.h>
++
++#include <asm/ptrace.h>
++
++#include "regset.h"
+ #include "glibc-tdep.h"
+ #include "sh-tdep.h"
+ #include "linux-tdep.h"
+@@ -181,9 +204,505 @@ static struct tramp_frame sh_linux_rt_sigreturn_tramp_frame = {
+   sh_linux_rt_sigreturn_init
+ };
+ 
++/* Recognizing signal handler frames.  */
++
++/* GNU/Linux has two flavors of signals.  Normal signal handlers, and
++   "realtime" (RT) signals.  The RT signals can provide additional
++   information to the signal handler if the SA_SIGINFO flag is set
++   when establishing a signal handler using `sigaction'.  It is not
++   unlikely that future versions of GNU/Linux will support SA_SIGINFO
++   for normal signals too.  */
++
++/* When the SH Linux kernel calls a signal handler and the
++   SA_RESTORER flag isn't set, the return address points to a bit of
++   code on the stack.  This function returns whether the PC appears to
++   be within this bit of code.
++
++   The instruction sequence for normal signals is
++       mov.w  1f,r3
++       trapa  #16
++       or     r0, r0
++       or     r0, r0
++       or     r0, r0
++       or     r0, r0
++       or     r0, r0
++    1: .word  __NR_sigreturn
++   or 0x9305 0xc310 0x200b 0x200b 0x200b 0x200b 0x200b 0x0077.
++
++   Checking for the code sequence should be somewhat reliable, because
++   the effect is to call the system call sigreturn.  This is unlikely
++   to occur anywhere other than a signal trampoline.
++
++   It kind of sucks that we have to read memory from the process in
++   order to identify a signal trampoline, but there doesn't seem to be
++   any other way.  The PC_IN_SIGTRAMP macro in tm-linux.h arranges to
++   only call us if no function name could be identified, which should
++   be the case since the code is on the stack.
++
++   Detection of signal trampolines for handlers that set the
++   SA_RESTORER flag is in general not possible.  Unfortunately this is
++   what the GNU C Library has been doing for quite some time now.
++   However, as of version 2.1.2, the GNU C Library uses signal
++   trampolines (named __restore and __restore_rt) that are identical
++   to the ones used by the kernel.  Therefore, these trampolines are
++   supported too.  */
++
++#define MOVW(n)	 (0x9300|((n)-2))	/* Move mem word at PC+n to R3 */
++#define TRAP16	 0xc310			/* Syscall w/no args (NR in R3) */
++#define OR_R0_R0 0x200b			/* or r0,r0 (insert to avoid hardware bug) */
++
++#define LINUX_SIGTRAMP_INSN0	MOVW(7)		/* Move mem word at PC+7 to R3 */
++#define LINUX_SIGTRAMP_INSN1	TRAP16		/* Syscall w/no args (NR in R3) */
++#define LINUX_SIGTRAMP_INSN2	OR_R0_R0	/* or r0,r0 (insert to avoid hardware bug) */
++
++static const unsigned short linux_sigtramp_code[] =
++{
++  LINUX_SIGTRAMP_INSN0,
++  LINUX_SIGTRAMP_INSN1,
++  LINUX_SIGTRAMP_INSN2,
++  LINUX_SIGTRAMP_INSN2,
++  LINUX_SIGTRAMP_INSN2,
++  LINUX_SIGTRAMP_INSN2,
++  LINUX_SIGTRAMP_INSN2,
++  __NR_sigreturn
++};
++
++#define LINUX_SIGTRAMP_LEN (sizeof linux_sigtramp_code)
++
++/* If PC is in a sigtramp routine, return the address of the start of
++   the routine.  Otherwise, return 0.  */
++
++static CORE_ADDR
++sh_linux_sigtramp_start (struct frame_info *next_frame)
++{
++  CORE_ADDR pc = get_frame_pc (next_frame);
++  gdb_byte buf[LINUX_SIGTRAMP_LEN];
++
++  /* We only recognize a signal trampoline if PC is at the start of
++     one of the three instructions.  We optimize for finding the PC at
++     the start, as will be the case when the trampoline is not the
++     first frame on the stack.  We assume that in the case where the
++     PC is not at the start of the instruction sequence, there will be
++     a few trailing readable bytes on the stack.  */
++
++  if (!safe_frame_unwind_memory (next_frame, pc, buf, LINUX_SIGTRAMP_LEN))
++    return 0;
++
++  if (buf[0] != LINUX_SIGTRAMP_INSN0)
++    {
++      if (buf[0] != LINUX_SIGTRAMP_INSN1)
++        return 0;
++
++      pc -= 2;
++
++      if (!safe_frame_unwind_memory (next_frame, pc, buf, LINUX_SIGTRAMP_LEN))
++	return 0;
++    }
++
++  if (memcmp (buf, linux_sigtramp_code, LINUX_SIGTRAMP_LEN) != 0)
++    return 0;
++
++  return pc;
++}
++
++/* This function does the same for RT signals.  Here the instruction
++   sequence is
++       mov.w  1f,r3
++       trapa  #16
++       or     r0, r0
++       or     r0, r0
++       or     r0, r0
++       or     r0, r0
++       or     r0, r0
++    1: .word  __NR_rt_sigreturn
++   or 0x9305 0xc310 0x200b 0x200b 0x200b 0x200b 0x200b 0x00ad.
++
++   The effect is to call the system call rt_sigreturn.  */
++
++#define LINUX_RT_SIGTRAMP_INSN0		MOVW(7)		/* Move mem word at PC+7 to R3 */
++#define LINUX_RT_SIGTRAMP_INSN1		TRAP16		/* Syscall w/no args (NR in R3) */
++#define LINUX_RT_SIGTRAMP_INSN2		OR_R0_R0	/* or r0,r0 (insert to avoid hardware bug) */
++
++static const unsigned short linux_rt_sigtramp_code[] =
++{
++  LINUX_RT_SIGTRAMP_INSN0,
++  LINUX_RT_SIGTRAMP_INSN1,
++  LINUX_RT_SIGTRAMP_INSN2,
++  LINUX_RT_SIGTRAMP_INSN2,
++  LINUX_RT_SIGTRAMP_INSN2,
++  LINUX_RT_SIGTRAMP_INSN2,
++  LINUX_RT_SIGTRAMP_INSN2,
++  __NR_rt_sigreturn
++};
++
++#define LINUX_RT_SIGTRAMP_LEN (sizeof linux_rt_sigtramp_code)
++
++/* If PC is in a RT sigtramp routine, return the address of the start
++   of the routine.  Otherwise, return 0.  */
++
++static CORE_ADDR
++sh_linux_rt_sigtramp_start (struct frame_info *next_frame)
++{
++  CORE_ADDR pc = get_frame_pc (next_frame);
++  gdb_byte buf[LINUX_RT_SIGTRAMP_LEN];
++
++  /* We only recognize a signal trampoline if PC is at the start of
++     one of the two instructions.  We optimize for finding the PC at
++     the start, as will be the case when the trampoline is not the
++     first frame on the stack.  We assume that in the case where the
++     PC is not at the start of the instruction sequence, there will be
++     a few trailing readable bytes on the stack.  */
++
++  if (!safe_frame_unwind_memory (next_frame, pc, buf, LINUX_RT_SIGTRAMP_LEN))
++    return 0;
++
++  if (buf[0] != LINUX_RT_SIGTRAMP_INSN0)
++    {
++      if (buf[0] != LINUX_RT_SIGTRAMP_INSN1)
++	return 0;
++
++      pc -= 2;
++
++      if (!safe_frame_unwind_memory (next_frame, pc, buf,
++				     LINUX_RT_SIGTRAMP_LEN))
++	return 0;
++    }
++
++  if (memcmp (buf, linux_rt_sigtramp_code, LINUX_RT_SIGTRAMP_LEN) != 0)
++    return 0;
++
++  return pc;
++}
++
++/* Return whether PC is in a GNU/Linux sigtramp routine.  */
++
++static int
++sh_linux_sigtramp_p (struct frame_info *this_frame)
++{
++  CORE_ADDR pc = get_frame_pc (this_frame);
++  char *name;
++
++  find_pc_partial_function (pc, &name, NULL, NULL);
++
++  /* If we have NAME, we can optimize the search.  The trampolines are
++     named __restore and __restore_rt.  However, they aren't dynamically
++     exported from the shared C library, so the trampoline may appear to
++     be part of the preceding function.  This should always be sigaction,
++     __sigaction, or __libc_sigaction (all aliases to the same function).  */
++  if (name == NULL || strstr (name, "sigaction") != NULL)
++    return (sh_linux_sigtramp_start (this_frame) != 0
++	    || sh_linux_rt_sigtramp_start (this_frame) != 0);
++
++  return (strcmp ("__restore", name) == 0
++	  || strcmp ("__restore_rt", name) == 0);
++}
++
++/* Offset to struct sigcontext in ucontext, from <asm/ucontext.h>.  */
++#define SH_LINUX_UCONTEXT_SIGCONTEXT_OFFSET 12
++
++
++/* Assuming NEXT_FRAME is a frame following a GNU/Linux sigtramp
++   routine, return the address of the associated sigcontext structure.  */
++
++static CORE_ADDR
++sh_linux_sigcontext_addr (struct frame_info *this_frame)
++{
++  CORE_ADDR pc;
++  CORE_ADDR sp;
++
++  sp = get_frame_register_unsigned (this_frame, SP_REGNUM);
++
++  pc = sh_linux_sigtramp_start (this_frame);
++  if (pc)
++    {
++      return sp;
++    }
++
++  pc = sh_linux_rt_sigtramp_start (this_frame);
++  if (pc)
++    {
++      CORE_ADDR ucontext_addr;
++
++      /* The sigcontext structure is part of the user context.  A
++	 pointer to the user context is passed as the third argument
++	 to the signal handler.  */
++      ucontext_addr = get_frame_register_unsigned (this_frame, ARG0_REGNUM+2);
++      return ucontext_addr + SH_LINUX_UCONTEXT_SIGCONTEXT_OFFSET;
++    }
++
++  error ("Couldn't recognize signal trampoline.");
++  return 0;
++}
++
++/* Signal trampolines.  */
++extern struct sh_frame_cache *sh_alloc_frame_cache (void);
++
++static struct sh_frame_cache *
++sh_linux_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
++{
++  struct sh_frame_cache *cache;
++  struct gdbarch_tdep *tdep = gdbarch_tdep (get_current_arch ());
++  CORE_ADDR sigcontext_addr;
++
++  if (*this_cache)
++    return *this_cache;
++
++  cache = sh_alloc_frame_cache ();
++
++  cache->base = get_frame_register_unsigned (this_frame, SP_REGNUM);
++  sigcontext_addr = tdep->sigcontext_addr (this_frame);
++  if (tdep->sc_reg_offset)
++    {
++      int i;
++
++      gdb_assert (tdep->sc_num_regs <= SH_NUM_REGS);
++
++      for (i = 0; i < tdep->sc_num_regs; i++)
++	if (tdep->sc_reg_offset[i] != -1)
++	  cache->saved_regs[i] = sigcontext_addr + tdep->sc_reg_offset[i];
++    }
++
++  *this_cache = cache;
++  return cache;
++}
++
++static void
++sh_linux_sigtramp_frame_this_id (struct frame_info *this_frame, void **this_cache,
++			     struct frame_id *this_id)
++{
++  struct sh_frame_cache *cache =
++    sh_linux_sigtramp_frame_cache (this_frame, this_cache);
++
++  (*this_id) = frame_id_build (cache->base + 64, cache->pc);
++}
++
++extern struct value * sh_frame_prev_register ();
++static struct value *
++sh_linux_sigtramp_frame_prev_register (struct frame_info *this_frame,
++                   void **this_cache, int regnum)
++{
++  sh_linux_sigtramp_frame_cache (this_frame, this_cache);
++
++  return sh_frame_prev_register (this_frame, this_cache, regnum);
++}
++
++static int
++sh_linux_sigtramp_frame_sniffer (const struct frame_unwind *self,
++                 struct frame_info *this_frame,
++                 void **this_prologue_cache)
++{
++  struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
++
++  /* We shouldn't even bother if we don't have a sigcontext_addr
++     handler.  */
++  if (tdep->sigcontext_addr == NULL)
++    return 0;
++
++  if (tdep->sigtramp_p != NULL)
++    {
++      if (tdep->sigtramp_p (this_frame))
++    return 1;
++    }
++
++  return 0;
++}
++
++static const struct frame_unwind sh_linux_sigtramp_frame_unwind =
++{
++  SIGTRAMP_FRAME,
++  sh_linux_sigtramp_frame_this_id,
++  sh_linux_sigtramp_frame_prev_register,
++  NULL,
++  sh_linux_sigtramp_frame_sniffer
++};
++
++/* Supply register REGNUM from the buffer specified by GREGS and LEN
++   in the general-purpose register set REGSET to register cache
++   REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
++
++void
++sh_supply_gregset (const struct regset *regset, struct regcache *regcache,
++             int regnum, const void *gregs, size_t len)
++{
++  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
++  const char *regs = gregs;
++  int i;
++
++  gdb_assert (len == tdep->sizeof_gregset);
++
++  for (i = 0; i < tdep->gregset_num_regs; i++)
++    {
++      if ((regnum == i || regnum == -1)
++      && tdep->gregset_reg_offset[i] != -1)
++    regcache_raw_supply (regcache, i, regs + tdep->gregset_reg_offset[i]);
++    }
++}
++
++/* Collect register REGNUM from the register cache REGCACHE and store
++   it in the buffer specified by GREGS and LEN as described by the
++   general-purpose register set REGSET.  If REGNUM is -1, do this for
++   all registers in REGSET.  */
++
++void
++sh_collect_gregset (const struct regset *regset,
++              const struct regcache *regcache,
++              int regnum, void *gregs, size_t len)
++{
++  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
++  char *regs = gregs;
++  int i;
++
++  gdb_assert (len == tdep->sizeof_gregset);
++
++  for (i = 0; i < tdep->gregset_num_regs; i++)
++    {
++      if ((regnum == i || regnum == -1)
++      && tdep->gregset_reg_offset[i] != -1)
++    regcache_raw_collect (regcache, i, regs + tdep->gregset_reg_offset[i]);
++    }
++}
++
++/* Supply register REGNUM from the buffer specified by FPREGS and LEN
++   in the floating-point register set REGSET to register cache
++   REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
++
++static void
++sh_supply_fpregset (const struct regset *regset, struct regcache *regcache,
++              int regnum, const void *fpregs, size_t len)
++{
++  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
++  const char *regs = fpregs;
++  int i;
++
++  gdb_assert (len == tdep->sizeof_fpregset);
++  for (i = 0; i < 16; i++)
++    {
++      if (regnum == i+25 || regnum == -1)
++    regcache_raw_supply (regcache, i+25, regs + i*4);
++    }
++  if (regnum == FPSCR_REGNUM || regnum == -1)
++    regcache_raw_supply (regcache, FPSCR_REGNUM, regs + 32*4);
++  if (regnum == FPUL_REGNUM || regnum == -1)
++    regcache_raw_supply (regcache, FPUL_REGNUM, regs + 33*4);
++}
++
++/* Collect register REGNUM from the register cache REGCACHE and store
++   it in the buffer specified by FPREGS and LEN as described by the
++   floating-point register set REGSET.  If REGNUM is -1, do this for
++   all registers in REGSET.  */
++
++static void
++sh_collect_fpregset (const struct regset *regset,
++               const struct regcache *regcache,
++               int regnum, void *fpregs, size_t len)
++{
++  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
++  char *regs = fpregs;
++  int i;
++
++  gdb_assert (len == tdep->sizeof_fpregset);
++  for (i = 0; i < 16; i++)
++    {
++      if (regnum == i+25 || regnum == -1)
++    regcache_raw_collect (regcache, i+25, regs + i*4);
++    }
++  if (regnum == FPSCR_REGNUM || regnum == -1)
++    regcache_raw_collect (regcache, FPSCR_REGNUM, regs + 32*4);
++  if (regnum == FPUL_REGNUM || regnum == -1)
++    regcache_raw_collect (regcache, FPUL_REGNUM, regs + 33*4);
++}
++
++/* Return the appropriate register set for the core section identified
++   by SECT_NAME and SECT_SIZE.  */
++
++const struct regset *
++sh_linux_regset_from_core_section (struct gdbarch *gdbarch,
++                   const char *sect_name, size_t sect_size)
++{
++  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++
++  if (strcmp (sect_name, ".reg") == 0 && sect_size == tdep->sizeof_gregset)
++    {
++      if (tdep->gregset == NULL)
++    tdep->gregset = regset_alloc (gdbarch, sh_supply_gregset,
++                      sh_collect_gregset);
++      return tdep->gregset;
++    }
++
++  if ((strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset))
++    {
++      if (tdep->fpregset == NULL)
++    tdep->fpregset = regset_alloc (gdbarch, sh_supply_fpregset,
++                       sh_collect_fpregset);
++      return tdep->fpregset;
++    }
++
++  return NULL;
++}
++
++/* The register sets used in GNU/Linux ELF core-dumps are identical to
++   the register sets in `struct user' that are used for a.out
++   core-dumps.  These are also used by ptrace(2).  The corresponding
++   types are `elf_gregset_t' for the general-purpose registers (with
++   `elf_greg_t' the type of a single GP register) and `elf_fpregset_t'
++   for the floating-point registers.
++
++   Those types used to be available under the names `gregset_t' and
++   `fpregset_t' too, and GDB used those names in the past.  But those
++   names are now used for the register sets used in the `mcontext_t'
++   type, which have a different size and layout.  */
++
++/* Mapping between the general-purpose registers in `struct user'
++   format and GDB's register cache layout.  */
++
++/* From <sys/reg.h>.  */
++static int sh_linux_gregset_reg_offset[] =
++{
++ 0,	4,	8,	12,	16,	20,	24,	28,
++ 32,	36,	40,	44,	48,	52,	56,	60,
++
++ REG_PC*4,   REG_PR*4,   REG_GBR*4,  -1,
++ REG_MACH*4, REG_MACL*4, REG_SR*4,
++};
++
++/* Mapping between the general-purpose registers in `struct
++   sigcontext' format and GDB's register cache layout.  */
++
++/* From <asm/sigcontext.h>.  */
++static int sh_linux_sc_reg_offset[] =
++{
++ 4,	8,	12,	16,	20,	24,	28,	32,
++ 36,	40,	44,	48,	52,	56,	60,	64,
++ 68,	72,	80,	-1,
++ 84,	88,	76
++};
++
+ static void
+ sh_linux_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
+ {
++  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++  bfd abfd;
++
++  tdep->gregset_reg_offset = sh_linux_gregset_reg_offset;
++  tdep->gregset_num_regs = ARRAY_SIZE (sh_linux_gregset_reg_offset);
++  tdep->sizeof_gregset = 23 * 4;
++
++  tdep->jb_pc_offset = 32;     /* From <bits/setjmp.h>.  */
++
++  tdep->sigtramp_p = sh_linux_sigtramp_p;
++  tdep->sigcontext_addr = sh_linux_sigcontext_addr;
++  tdep->sc_reg_offset = sh_linux_sc_reg_offset;
++  tdep->sc_num_regs = ARRAY_SIZE (sh_linux_sc_reg_offset);
++
++  frame_unwind_append_unwinder(gdbarch, &sh_linux_sigtramp_frame_unwind);
++
++  /* If we have a register mapping, enable the generic core file
++     support, unless it has already been enabled.  */
++  if (tdep->gregset_reg_offset
++      && !gdbarch_regset_from_core_section_p (gdbarch))
++    set_gdbarch_regset_from_core_section (gdbarch,
++                                         sh_linux_regset_from_core_section);
++
+   linux_init_abi (info, gdbarch);
+ 
+   /* GNU/Linux uses SVR4-style shared libraries.  */
+diff --git a/gdb/sh-tdep.c b/gdb/sh-tdep.c
+index 7aadf9165ca..e173e215a2d 100644
+--- a/gdb/sh-tdep.c
++++ b/gdb/sh-tdep.c
+@@ -21,6 +21,9 @@
+    sac@cygnus.com.  */
+ 
+ #include "defs.h"
++#include "arch-utils.h"
++#include "command.h"
++#include "dummy-frame.h"
+ #include "frame.h"
+ #include "frame-base.h"
+ #include "frame-unwind.h"
+@@ -66,23 +69,6 @@ static const char *const sh_cc_enum[] = {
+ 
+ static const char *sh_active_calling_convention = sh_cc_gcc;
+ 
+-#define SH_NUM_REGS 67
+-
+-struct sh_frame_cache
+-{
+-  /* Base address.  */
+-  CORE_ADDR base;
+-  LONGEST sp_offset;
+-  CORE_ADDR pc;
+-
+-  /* Flag showing that a frame has been created in the prologue code.  */
+-  int uses_fp;
+-
+-  /* Saved registers.  */
+-  CORE_ADDR saved_regs[SH_NUM_REGS];
+-  CORE_ADDR saved_sp;
+-};
+-
+ static int
+ sh_is_renesas_calling_convention (struct type *func_type)
+ {
+@@ -1050,6 +1036,7 @@ sh_treat_as_flt_p (struct type *type)
+     return 0;
+   /* Otherwise if the type of that member is float, the whole type is
+      treated as float.  */
++  type = check_typedef (type);
+   if (type->field (0).type ()->code () == TYPE_CODE_FLT)
+     return 1;
+   /* Otherwise it's not treated as float.  */
+@@ -1100,7 +1087,7 @@ sh_push_dummy_call_fpu (struct gdbarch *gdbarch,
+      in four registers available.  Loop thru args from first to last.  */
+   for (argnum = 0; argnum < nargs; argnum++)
+     {
+-      type = value_type (args[argnum]);
++      type = check_typedef (value_type (args[argnum]));
+       len = TYPE_LENGTH (type);
+       val = sh_justify_value_in_reg (gdbarch, args[argnum], len);
+ 
+@@ -1835,7 +1822,7 @@ sh_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
+     reg->how = DWARF2_FRAME_REG_UNDEFINED;
+ }
+ 
+-static struct sh_frame_cache *
++struct sh_frame_cache *
+ sh_alloc_frame_cache (void)
+ {
+   struct sh_frame_cache *cache;
+@@ -1862,7 +1849,7 @@ sh_alloc_frame_cache (void)
+   return cache;
+ }
+ 
+-static struct sh_frame_cache *
++struct sh_frame_cache *
+ sh_frame_cache (struct frame_info *this_frame, void **this_cache)
+ {
+   struct gdbarch *gdbarch = get_frame_arch (this_frame);
+@@ -1929,9 +1916,9 @@ sh_frame_cache (struct frame_info *this_frame, void **this_cache)
+   return cache;
+ }
+ 
+-static struct value *
+-sh_frame_prev_register (struct frame_info *this_frame,
+-			void **this_cache, int regnum)
++struct value *
++sh_frame_prev_register (struct frame_info *this_frame, void **this_cache,
++			int regnum)
+ {
+   struct gdbarch *gdbarch = get_frame_arch (this_frame);
+   struct sh_frame_cache *cache = sh_frame_cache (this_frame, this_cache);
+@@ -1945,7 +1932,7 @@ sh_frame_prev_register (struct frame_info *this_frame,
+      the current frame.  Frob regnum so that we pull the value from
+      the correct place.  */
+   if (regnum == gdbarch_pc_regnum (gdbarch))
+-    regnum = PR_REGNUM;
++    regnum = PR_REGNUM; /* XXX: really? */
+ 
+   if (regnum < SH_NUM_REGS && cache->saved_regs[regnum] != -1)
+     return frame_unwind_got_memory (this_frame, regnum,
+@@ -2234,8 +2221,8 @@ sh_return_in_first_hidden_param_p (struct gdbarch *gdbarch,
+ static struct gdbarch *
+ sh_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
+ {
+-  struct gdbarch *gdbarch;
+   struct gdbarch_tdep *tdep;
++  struct gdbarch *gdbarch;
+ 
+   /* If there is already a candidate, use it.  */
+   arches = gdbarch_list_lookup_by_info (arches, &info);
+@@ -2247,6 +2234,18 @@ sh_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
+   tdep = XCNEW (struct gdbarch_tdep);
+   gdbarch = gdbarch_alloc (&info, tdep);
+ 
++  /* General-purpose registers.  */
++  tdep->gregset = NULL;
++  tdep->gregset_reg_offset = NULL;
++  tdep->gregset_num_regs = 23;
++  tdep->sizeof_gregset = 0;
++
++  /* Floating-point registers.  */
++  tdep->fpregset = NULL;
++  tdep->sizeof_fpregset = 34*4;
++
++  tdep->jb_pc_offset = -1;
++
+   set_gdbarch_short_bit (gdbarch, 2 * TARGET_CHAR_BIT);
+   set_gdbarch_int_bit (gdbarch, 4 * TARGET_CHAR_BIT);
+   set_gdbarch_long_bit (gdbarch, 4 * TARGET_CHAR_BIT);
+@@ -2398,10 +2397,11 @@ sh_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
+       break;
+     }
+ 
++  dwarf2_append_unwinders (gdbarch);
++
+   /* Hook in ABI-specific overrides, if they have been registered.  */
+   gdbarch_init_osabi (info, gdbarch);
+ 
+-  dwarf2_append_unwinders (gdbarch);
+   frame_unwind_append_unwinder (gdbarch, &sh_stub_unwind);
+   frame_unwind_append_unwinder (gdbarch, &sh_frame_unwind);
+ 
+diff --git a/gdb/sh-tdep.h b/gdb/sh-tdep.h
+index 76e2e76e39b..2710f63010c 100644
+--- a/gdb/sh-tdep.h
++++ b/gdb/sh-tdep.h
+@@ -21,6 +21,12 @@
+ 
+ /* Contributed by Steve Chamberlain sac@cygnus.com.  */
+ 
++struct frame_info;
++struct gdbarch;
++struct reggroup;
++struct regset;
++struct regcache;
++
+ /* Registers for all SH variants.  Used also by sh3-rom.c.  */
+ enum
+   {
+@@ -29,6 +35,7 @@ enum
+     ARG0_REGNUM = 4,
+     ARGLAST_REGNUM = 7,
+     FP_REGNUM = 14,
++    SP_REGNUM = 15,
+     PC_REGNUM = 16,
+     PR_REGNUM = 17,
+     GBR_REGNUM = 18,
+@@ -81,6 +88,24 @@ enum
+     FV0_REGNUM = 76,
+     FV_LAST_REGNUM = 79
+   };
++#define SH_NUM_REGS 67
++
++struct sh_frame_cache
++{
++  /* Base address.  */
++  CORE_ADDR base;
++  LONGEST sp_offset;
++  CORE_ADDR pc;
++
++  /* Flag showing that a frame has been created in the prologue code. */
++  int uses_fp;
++
++  /* Saved registers.  */
++  CORE_ADDR saved_regs[SH_NUM_REGS];
++  CORE_ADDR saved_sp;
++};
++
++extern struct sh_frame_cache *sh_frame_cache (struct frame_info *next_frame, void **this_cache);
+ 
+ /* This structure describes a register in a core-file.  */
+ struct sh_corefile_regmap
+@@ -89,8 +114,32 @@ struct sh_corefile_regmap
+   unsigned int offset;
+ };
+ 
++/* sh architecture specific information.  */
+ struct gdbarch_tdep
+ {
++  /* General-purpose registers.  */
++  struct regset *gregset;
++  int *gregset_reg_offset;
++  int gregset_num_regs;
++  size_t sizeof_gregset;
++
++  /* Floating-point registers.  */
++  struct regset *fpregset;
++  size_t sizeof_fpregset;
++
++  /* Offset of saved PC in jmp_buf.  */
++  int jb_pc_offset;
++
++  /* Detect sigtramp.  */
++  int (*sigtramp_p) (struct frame_info *);
++
++  /* Get address of sigcontext for sigtramp.  */
++  CORE_ADDR (*sigcontext_addr) (struct frame_info *);
++
++  /* Offset of registers in `struct sigcontext'.  */
++  int *sc_reg_offset;
++  int sc_num_regs;
++
+   /* Non-NULL when debugging from a core file.  Provides the offset
+      where each general-purpose register is stored inside the associated
+      core file section.  */
+diff --git a/gdb/testsuite/gdb.asm/asm-source.exp b/gdb/testsuite/gdb.asm/asm-source.exp
+index 4914498f98c..6e25cbed185 100644
+--- a/gdb/testsuite/gdb.asm/asm-source.exp
++++ b/gdb/testsuite/gdb.asm/asm-source.exp
+@@ -116,6 +116,11 @@ switch -glob -- [istarget] {
+             append link-flags " -m elf32ppc"
+         }
+     }
++    "sh*-linux*" {
++        set asm-arch sh-linux
++        set asm-flags "-I${srcdir}/${subdir} -I${objdir}/${subdir}"
++	set debug-flags "-gdwarf-2"
++    }
+     "sh*-*-*" {
+         set asm-arch sh
+ 	set debug-flags "-gdwarf-2"
+diff --git a/gdb/testsuite/gdb.asm/sh.inc b/gdb/testsuite/gdb.asm/sh.inc
+index a4a5fc545e4..89efed7795c 100644
+--- a/gdb/testsuite/gdb.asm/sh.inc
++++ b/gdb/testsuite/gdb.asm/sh.inc
+@@ -40,9 +40,8 @@
+ 	mov.l   .Lconst\@,r1
+ 	bra	.Lafterconst\@
+ 	nop
+-	nop
+-.Lconst\@:
+ 	.align	2
++.Lconst\@:
+ 	.long	\subr
+ 	.align	1
+ .Lafterconst\@:
+diff --git a/gdb/testsuite/gdb.base/annota1.c b/gdb/testsuite/gdb.base/annota1.c
+index 424e1b83278..0de2e7b633a 100644
+--- a/gdb/testsuite/gdb.base/annota1.c
++++ b/gdb/testsuite/gdb.base/annota1.c
+@@ -1,6 +1,9 @@
+ #include <stdio.h>
+ #include <signal.h>
+ 
++#ifdef __sh__
++#define signal(a,b)    /* Signals not supported on this target - make them go away */
++#endif
+ 
+ void
+ handle_USR1 (int sig)
+diff --git a/gdb/testsuite/gdb.base/annota3.c b/gdb/testsuite/gdb.base/annota3.c
+index 424e1b83278..952aaf218ab 100644
+--- a/gdb/testsuite/gdb.base/annota3.c
++++ b/gdb/testsuite/gdb.base/annota3.c
+@@ -1,6 +1,10 @@
+ #include <stdio.h>
+ #include <signal.h>
+ 
++#ifdef __sh__
++#define signal(a,b)    /* Signals not supported on this target - make them go away */
++#endif
++
+ 
+ void
+ handle_USR1 (int sig)
+diff --git a/gdb/testsuite/gdb.base/sigall.c b/gdb/testsuite/gdb.base/sigall.c
+index 81f3b08d6bc..1574b2d6cb8 100644
+--- a/gdb/testsuite/gdb.base/sigall.c
++++ b/gdb/testsuite/gdb.base/sigall.c
+@@ -1,6 +1,9 @@
+ #include <signal.h>
+ #include <unistd.h>
+ 
++#ifdef __sh__
++#define signal(a,b)    /* Signals not supported on this target - make them go away */
++#endif
+ 
+ /* Signal handlers, we set breakpoints in them to make sure that the
+    signals really get delivered.  */
+diff --git a/gdb/testsuite/gdb.base/signals.c b/gdb/testsuite/gdb.base/signals.c
+index 756606880fa..1205a9bc9c5 100644
+--- a/gdb/testsuite/gdb.base/signals.c
++++ b/gdb/testsuite/gdb.base/signals.c
+@@ -3,6 +3,10 @@
+ #include <signal.h>
+ #include <unistd.h>
+ 
++#ifdef __sh__
++#define signal(a,b)    /* Signals not supported on this target - make them go away */
++#define alarm(a)       /* Ditto for alarm() */
++#endif
+ 
+ static int count = 0;
+ 
+-- 
+2.29.2
+

+ 50 - 0
recipes-devtools/gdb/gdb/0005-Dont-disable-libreadline.a-when-using-disable-static.patch

@@ -0,0 +1,50 @@
+From ec566877c50bdae0013a38dd457004e1db725d86 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sat, 30 Apr 2016 15:25:03 -0700
+Subject: [PATCH 05/11] Dont disable libreadline.a when using --disable-static
+
+If gdb is configured with --disable-static then this is dutifully passed to
+readline which then disables libreadline.a, which causes a problem when gdb
+tries to link against that.
+
+To ensure that readline always builds static libraries, pass --enable-static to
+the sub-configure.
+
+Upstream-Status: Pending
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ Makefile.def | 3 ++-
+ Makefile.in  | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/Makefile.def b/Makefile.def
+index 76d062bb671..e0a1e2b14b1 100644
+--- a/Makefile.def
++++ b/Makefile.def
+@@ -105,7 +105,8 @@ host_modules= { module= libiconv;
+ 		missing= install-html;
+ 		missing= install-info; };
+ host_modules= { module= m4; };
+-host_modules= { module= readline; };
++host_modules= { module= readline;
++                extra_configure_flags='--enable-static';};
+ host_modules= { module= sid; };
+ host_modules= { module= sim; };
+ host_modules= { module= texinfo; no_install= true; };
+diff --git a/Makefile.in b/Makefile.in
+index 9dfd39fae13..eaf1dd0f229 100644
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -25548,7 +25548,7 @@ configure-readline:
+ 	  $$s/$$module_srcdir/configure \
+ 	  --srcdir=$${topdir}/$$module_srcdir \
+ 	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
+-	  --target=${target_alias}  \
++	  --target=${target_alias} --enable-static \
+ 	  || exit 1
+ @endif readline
+ 
+-- 
+2.29.2
+

+ 36 - 0
recipes-devtools/gdb/gdb/0006-use-asm-sgidefs.h.patch

@@ -0,0 +1,36 @@
+From 4b02e54b87d435e1715ce871bcce720561a7afb1 Mon Sep 17 00:00:00 2001
+From: Andre McCurdy <amccurdy@gmail.com>
+Date: Sat, 30 Apr 2016 15:29:06 -0700
+Subject: [PATCH 06/11] use <asm/sgidefs.h>
+
+Build fix for MIPS with musl libc
+
+The MIPS specific header <sgidefs.h> is provided by glibc and uclibc
+but not by musl. Regardless of the libc, the kernel headers provide
+<asm/sgidefs.h> which provides the same definitions, so use that
+instead.
+
+Upstream-Status: Pending
+
+Signed-off-by: Andre McCurdy <armccurdy@gmail.com>
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gdb/mips-linux-nat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gdb/mips-linux-nat.c b/gdb/mips-linux-nat.c
+index 4337795bac8..7c8e54cabe0 100644
+--- a/gdb/mips-linux-nat.c
++++ b/gdb/mips-linux-nat.c
+@@ -31,7 +31,7 @@
+ #include "gdb_proc_service.h"
+ #include "gregset.h"
+ 
+-#include <sgidefs.h>
++#include <asm/sgidefs.h>
+ #include "nat/gdb_ptrace.h"
+ #include <asm/ptrace.h>
+ #include "inf-ptrace.h"
+-- 
+2.29.2
+

+ 30 - 0
recipes-devtools/gdb/gdb/0008-Change-order-of-CFLAGS.patch

@@ -0,0 +1,30 @@
+From 07175ae348c9d07581e1db94762d5a2d577a75ad Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sat, 30 Apr 2016 15:35:39 -0700
+Subject: [PATCH 08/11] Change order of CFLAGS
+
+Lets us override Werror if need be
+
+Upstream-Status: Inappropriate
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gdbserver/Makefile.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gdbserver/Makefile.in b/gdbserver/Makefile.in
+index 2b1a77f2de7..cb5ddb12fd5 100644
+--- a/gdbserver/Makefile.in
++++ b/gdbserver/Makefile.in
+@@ -161,7 +161,7 @@ WIN32APILIBS = @WIN32APILIBS@
+ INTERNAL_CFLAGS_BASE = ${CXXFLAGS} ${GLOBAL_CFLAGS} \
+ 	${PROFILE_CFLAGS} ${INCLUDE_CFLAGS} ${CPPFLAGS} $(PTHREAD_CFLAGS)
+ INTERNAL_WARN_CFLAGS = ${INTERNAL_CFLAGS_BASE} $(WARN_CFLAGS)
+-INTERNAL_CFLAGS = ${INTERNAL_WARN_CFLAGS} $(WERROR_CFLAGS) -DGDBSERVER
++INTERNAL_CFLAGS = ${INTERNAL_WARN_CFLAGS} $(WERROR_CFLAGS) ${COMPILER_CFLAGS} -DGDBSERVER
+ 
+ # LDFLAGS is specifically reserved for setting from the command line
+ # when running make.
+-- 
+2.29.2
+

+ 48 - 0
recipes-devtools/gdb/gdb/0009-resolve-restrict-keyword-conflict.patch

@@ -0,0 +1,48 @@
+From 73a3babe48c7948f71683d0862eddc1609fdaa3d Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Tue, 10 May 2016 08:47:05 -0700
+Subject: [PATCH 09/11] resolve restrict keyword conflict
+
+GCC detects that we call 'restrict' as param name in function
+signatures and complains since both params are called 'restrict'
+therefore we use __restrict to denote the C99 keywork
+
+Upstream-Status: Pending
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gnulib/import/sys_time.in.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/gnulib/import/sys_time.in.h b/gnulib/import/sys_time.in.h
+index d30b26719b2..fd76af5e6d9 100644
+--- a/gnulib/import/sys_time.in.h
++++ b/gnulib/import/sys_time.in.h
+@@ -93,20 +93,20 @@ struct timeval
+ #   define gettimeofday rpl_gettimeofday
+ #  endif
+ _GL_FUNCDECL_RPL (gettimeofday, int,
+-                  (struct timeval *restrict, void *restrict)
++                  (struct timeval *__restrict, void *__restrict)
+                   _GL_ARG_NONNULL ((1)));
+ _GL_CXXALIAS_RPL (gettimeofday, int,
+-                  (struct timeval *restrict, void *restrict));
++                  (struct timeval *__restrict, void *__restrict));
+ # else
+ #  if !@HAVE_GETTIMEOFDAY@
+ _GL_FUNCDECL_SYS (gettimeofday, int,
+-                  (struct timeval *restrict, void *restrict)
++                  (struct timeval *__restrict, void *__restrict)
+                   _GL_ARG_NONNULL ((1)));
+ #  endif
+ /* Need to cast, because on glibc systems, by default, the second argument is
+                                                   struct timezone *.  */
+ _GL_CXXALIAS_SYS_CAST (gettimeofday, int,
+-                       (struct timeval *restrict, void *restrict));
++                       (struct timeval *__restrict, void *__restrict));
+ # endif
+ _GL_CXXALIASWARN (gettimeofday);
+ # if defined __cplusplus && defined GNULIB_NAMESPACE
+-- 
+2.29.2
+

+ 49 - 0
recipes-devtools/gdb/gdb/0010-Fix-invalid-sigprocmask-call.patch

@@ -0,0 +1,49 @@
+From c5c5372c6a319cac8b3f9f86304fcebcbb5ea06f Mon Sep 17 00:00:00 2001
+From: Yousong Zhou <yszhou4tech@gmail.com>
+Date: Fri, 24 Mar 2017 10:36:03 +0800
+Subject: [PATCH 10/11] Fix invalid sigprocmask call
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The POSIX document says
+
+    The pthread_sigmask() and sigprocmask() functions shall fail if:
+
+    [EINVAL]
+    The value of the how argument is not equal to one of the defined values.
+
+and this is how musl-libc is currently doing.  Fix the call to be safe
+and correct
+
+ [1] http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_sigmask.html
+
+gdb/ChangeLog:
+2017-03-24  Yousong Zhou  <yszhou4tech@gmail.com>
+
+    * common/signals-state-save-restore.c (save_original_signals_state):
+    Fix invalid sigprocmask call.
+
+Upstream-Status: Pending [not author, cherry-picked from LEDE https://bugs.lede-project.org/index.php?do=details&task_id=637&openedfrom=-1%2Bweek]
+Signed-off-by: André Draszik <adraszik@tycoint.com>
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gdbsupport/signals-state-save-restore.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gdbsupport/signals-state-save-restore.cc b/gdbsupport/signals-state-save-restore.cc
+index c292d498daf..af9dcaeb08e 100644
+--- a/gdbsupport/signals-state-save-restore.cc
++++ b/gdbsupport/signals-state-save-restore.cc
+@@ -38,7 +38,7 @@ save_original_signals_state (bool quiet)
+   int i;
+   int res;
+ 
+-  res = gdb_sigmask (0,  NULL, &original_signal_mask);
++  res = gdb_sigmask (SIG_BLOCK,  NULL, &original_signal_mask);
+   if (res == -1)
+     perror_with_name (("sigprocmask"));
+ 
+-- 
+2.29.2
+

+ 40 - 0
recipes-devtools/gdb/gdb/0011-gdbserver-ctrl-c-handling.patch

@@ -0,0 +1,40 @@
+From 63df98fa78c8a6e12b40ebdc5c155838d2bf8b5f Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Thu, 29 Nov 2018 18:00:23 -0800
+Subject: [PATCH 11/11] gdbserver ctrl-c handling
+
+This problem was created by the upstream commit 78708b7c8c
+After applying the commit, it will send SIGINT to the process
+group(-signal_pid).
+But if we use gdbserver send SIGINT, and the attached process is not a
+process
+group leader, then the "kill (-signal_pid, SIGINT)" returns error and
+fails  to
+interrupt the attached process.
+
+Upstream-Status: Submitted
+[https://sourceware.org/bugzilla/show_bug.cgi?id=18945]
+
+Author: Josh Gao
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gdbserver/linux-low.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gdbserver/linux-low.cc b/gdbserver/linux-low.cc
+index e45493339d2..aabc099b456 100644
+--- a/gdbserver/linux-low.cc
++++ b/gdbserver/linux-low.cc
+@@ -5714,7 +5714,7 @@ linux_process_target::request_interrupt ()
+ {
+   /* Send a SIGINT to the process group.  This acts just like the user
+      typed a ^C on the controlling terminal.  */
+-  ::kill (-signal_pid, SIGINT);
++  ::kill (signal_pid, SIGINT);
+ }
+ 
+ bool
+-- 
+2.29.2
+

+ 28 - 0
recipes-devtools/gdb/gdb_10.1.bb

@@ -0,0 +1,28 @@
+require gdb.inc
+require gdb-${PV}.inc
+
+inherit python3-dir
+
+EXTRA_OEMAKE:append:libc-musl = "\
+                                 gt_cv_func_gnugettext1_libc=yes \
+                                 gt_cv_func_gnugettext2_libc=yes \
+                                 gl_cv_func_working_strerror=yes \
+                                 gl_cv_func_strerror_0_works=yes \
+                                 gl_cv_func_gettimeofday_clobber=no \
+                                "
+
+do_configure:prepend() {
+	if [ "${@bb.utils.filter('PACKAGECONFIG', 'python', d)}" ]; then
+		cat > ${WORKDIR}/python << EOF
+#!/bin/sh
+case "\$2" in
+	--includes) echo "-I${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}/" ;;
+	--ldflags) echo "-Wl,-rpath-link,${STAGING_LIBDIR}/.. -Wl,-rpath,${libdir}/.. -lpthread -ldl -lutil -lm -lpython${PYTHON_BASEVERSION}${PYTHON_ABI}" ;;
+	--exec-prefix) echo "${exec_prefix}" ;;
+	*) exit 1 ;;
+esac
+exit 0
+EOF
+		chmod +x ${WORKDIR}/python
+	fi
+}