0022-target-riscv-rvv-1.0-update-check-functions.patch 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. From 6a82af7b913410fe993671d033d1ee4402d10d4a Mon Sep 17 00:00:00 2001
  2. From: Frank Chang <frank.chang@sifive.com>
  3. Date: Mon, 28 Sep 2020 16:23:30 +0800
  4. Subject: [PATCH 022/107] target/riscv: rvv-1.0: update check functions
  5. Update check functions with RVV 1.0 rules.
  6. Signed-off-by: Frank Chang <frank.chang@sifive.com>
  7. Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
  8. ---
  9. target/riscv/insn_trans/trans_rvv.c.inc | 739 ++++++++++++++++--------
  10. 1 file changed, 505 insertions(+), 234 deletions(-)
  11. diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
  12. index 46e18a62b5..f666c64bbe 100644
  13. --- a/target/riscv/insn_trans/trans_rvv.c.inc
  14. +++ b/target/riscv/insn_trans/trans_rvv.c.inc
  15. @@ -19,11 +19,124 @@
  16. #include "tcg/tcg-gvec-desc.h"
  17. #include "internals.h"
  18. +static inline bool is_overlapped(const int8_t astart, int8_t asize,
  19. + const int8_t bstart, int8_t bsize)
  20. +{
  21. + const int8_t aend = astart + asize;
  22. + const int8_t bend = bstart + bsize;
  23. +
  24. + return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
  25. +}
  26. +
  27. +static bool require_rvv(DisasContext *s)
  28. +{
  29. + return s->mstatus_vs != 0;
  30. +}
  31. +
  32. +static bool require_rvf(DisasContext *s)
  33. +{
  34. + if (s->mstatus_fs == 0) {
  35. + return false;
  36. + }
  37. +
  38. + switch (s->sew) {
  39. + case MO_16:
  40. + case MO_32:
  41. + return has_ext(s, RVF);
  42. + case MO_64:
  43. + return has_ext(s, RVD);
  44. + default:
  45. + return false;
  46. + }
  47. +}
  48. +
  49. +static bool require_scale_rvf(DisasContext *s)
  50. +{
  51. + if (s->mstatus_fs == 0) {
  52. + return false;
  53. + }
  54. +
  55. + switch (s->sew) {
  56. + case MO_8:
  57. + case MO_16:
  58. + return has_ext(s, RVF);
  59. + case MO_32:
  60. + return has_ext(s, RVD);
  61. + default:
  62. + return false;
  63. + }
  64. +}
  65. +
  66. +/* Destination vector register group cannot overlap source mask register. */
  67. +static bool require_vm(int vm, int vd)
  68. +{
  69. + return (vm != 0 || vd != 0);
  70. +}
  71. +
  72. +static bool require_nf(int vd, int nf, int lmul)
  73. +{
  74. + int size = nf << MAX(lmul, 0);
  75. + return size <= 8 && vd + size <= 32;
  76. +}
  77. +
  78. +/*
  79. + * Vector register should aligned with the passed-in LMUL (EMUL).
  80. + * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
  81. + */
  82. +static bool require_align(const int8_t val, const int8_t lmul)
  83. +{
  84. + return lmul <= 0 || extract32(val, 0, lmul) == 0;
  85. +}
  86. +
  87. +/*
  88. + * A destination vector register group can overlap a source vector
  89. + * register group only if one of the following holds:
  90. + * 1. The destination EEW equals the source EEW.
  91. + * 2. The destination EEW is smaller than the source EEW and the overlap
  92. + * is in the lowest-numbered part of the source register group.
  93. + * 3. The destination EEW is greater than the source EEW, the source EMUL
  94. + * is at least 1, and the overlap is in the highest-numbered part of
  95. + * the destination register group.
  96. + * (Section 5.2)
  97. + *
  98. + * This function returns true if one of the following holds:
  99. + * * Destination vector register group does not overlap a source vector
  100. + * register group.
  101. + * * Rule 3 met.
  102. + * For rule 1, overlap is allowed so this function doesn't need to be called.
  103. + * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
  104. + * calling this function.
  105. + */
  106. +static bool require_noover(const int8_t dst, const int8_t dst_lmul,
  107. + const int8_t src, const int8_t src_lmul)
  108. +{
  109. + int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
  110. + int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
  111. +
  112. + /* Destination EEW is greater than the source EEW, check rule 3. */
  113. + if (dst_size > src_size) {
  114. + if (dst < src &&
  115. + src_lmul >= 0 &&
  116. + is_overlapped(dst, dst_size, src, src_size) &&
  117. + !is_overlapped(dst, dst_size, src + src_size, src_size)) {
  118. + return true;
  119. + }
  120. + }
  121. +
  122. + return !is_overlapped(dst, dst_size, src, src_size);
  123. +}
  124. +
  125. +static bool require_noover_seg(const int8_t dst, const int8_t nf,
  126. + const int8_t src)
  127. +{
  128. + return !is_overlapped(dst, nf, src, 1);
  129. +}
  130. +
  131. static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
  132. {
  133. TCGv s1, s2, dst;
  134. - if (!has_ext(ctx, RVV)) {
  135. + if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
  136. return false;
  137. }
  138. @@ -56,7 +169,7 @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
  139. {
  140. TCGv s1, s2, dst;
  141. - if (!has_ext(ctx, RVV)) {
  142. + if (!require_rvv(ctx) || !has_ext(ctx, RVV)) {
  143. return false;
  144. }
  145. @@ -100,54 +213,246 @@ static bool vext_check_isa_ill(DisasContext *s)
  146. return !s->vill;
  147. }
  148. +static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
  149. +{
  150. + return require_vm(vm, vd) &&
  151. + require_align(vd, s->lmul) &&
  152. + require_align(vs, s->lmul);
  153. +}
  154. +
  155. /*
  156. - * There are two rules check here.
  157. + * Check function for vector instruction with format:
  158. + * single-width result and single-width sources (SEW = SEW op SEW)
  159. *
  160. - * 1. Vector register numbers are multiples of LMUL. (Section 3.2)
  161. + * Rules to be checked here:
  162. + * 1. Destination vector register group for a masked vector
  163. + * instruction cannot overlap the source mask register (v0).
  164. + * (Section 5.3)
  165. + * 2. Destination vector register number is multiples of LMUL.
  166. + * (Section 3.3.2)
  167. + * 3. Source (vs2, vs1) vector register number are multiples of LMUL.
  168. + * (Section 3.3.2)
  169. + */
  170. +static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
  171. +{
  172. + return vext_check_ss(s, vd, vs2, vm) &&
  173. + require_align(vs1, s->lmul);
  174. +}
  175. +
  176. +static bool vext_check_ms(DisasContext *s, int vd, int vs)
  177. +{
  178. + bool ret = require_align(vs, s->lmul);
  179. + if (vd != vs) {
  180. + ret &= require_noover(vd, 0, vs, s->lmul);
  181. + }
  182. + return ret;
  183. +}
  184. +
  185. +/*
  186. + * Check function for maskable vector instruction with format:
  187. + * single-width result and single-width sources (SEW = SEW op SEW)
  188. *
  189. - * 2. For all widening instructions, the destination LMUL value must also be
  190. - * a supported LMUL value. (Section 11.2)
  191. + * Rules to be checked here:
  192. + * 1. Source (vs2, vs1) vector register number are multiples of LMUL.
  193. + * (Section 3.3.2)
  194. + * 2. Destination vector register cannot overlap a source vector
  195. + * register (vs2, vs1) group.
  196. + * (Section 5.2)
  197. + * 3. The destination vector register group for a masked vector
  198. + * instruction cannot overlap the source mask register (v0),
  199. + * unless the destination vector register is being written
  200. + * with a mask value (e.g., comparisons) or the scalar result
  201. + * of a reduction. (Section 5.3)
  202. */
  203. -static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
  204. +static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
  205. {
  206. - /*
  207. - * The destination vector register group results are arranged as if both
  208. - * SEW and LMUL were at twice their current settings. (Section 11.2).
  209. - */
  210. - int legal = widen ? 2 << s->lmul : 1 << s->lmul;
  211. + bool ret = vext_check_ms(s, vd, vs2) &&
  212. + require_align(vs1, s->lmul);
  213. + if (vd != vs1) {
  214. + ret &= require_noover(vd, 0, vs1, s->lmul);
  215. + }
  216. + return ret;
  217. +}
  218. - return !((s->lmul == 0x3 && widen) || (reg % legal));
  219. +/*
  220. + * Common check function for vector widening instructions
  221. + * of double-width result (2*SEW).
  222. + *
  223. + * Rules to be checked here:
  224. + * 1. The largest vector register group used by an instruction
  225. + * can not be greater than 8 vector registers (Section 5.2):
  226. + * => LMUL < 8.
  227. + * => SEW < 64.
  228. + * 2. Destination vector register number is multiples of 2 * LMUL.
  229. + * (Section 3.3.2, 11.2)
  230. + * 3. Destination vector register group for a masked vector
  231. + * instruction cannot overlap the source mask register (v0).
  232. + * (Section 5.3)
  233. + */
  234. +static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
  235. +{
  236. + return (s->lmul <= 2) &&
  237. + (s->sew < MO_64) &&
  238. + require_align(vd, s->lmul + 1) &&
  239. + require_vm(vm, vd);
  240. }
  241. /*
  242. - * There are two rules check here.
  243. + * Common check function for vector narrowing instructions
  244. + * of single-width result (SEW) and double-width source (2*SEW).
  245. + *
  246. + * Rules to be checked here:
  247. + * 1. The largest vector register group used by an instruction
  248. + * can not be greater than 8 vector registers (Section 5.2):
  249. + * => LMUL < 8.
  250. + * => SEW < 64.
  251. + * 2. Source vector register number is multiples of 2 * LMUL.
  252. + * (Section 3.3.2, 11.3)
  253. + * 3. Destination vector register number is multiples of LMUL.
  254. + * (Section 3.3.2, 11.3)
  255. + * 4. Destination vector register group for a masked vector
  256. + * instruction cannot overlap the source mask register (v0).
  257. + * (Section 5.3)
  258. + */
  259. +static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
  260. + int vm)
  261. +{
  262. + return (s->lmul <= 2) &&
  263. + (s->sew < MO_64) &&
  264. + require_align(vs2, s->lmul + 1) &&
  265. + require_align(vd, s->lmul) &&
  266. + require_vm(vm, vd);
  267. +}
  268. +
  269. +static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
  270. +{
  271. + return vext_wide_check_common(s, vd, vm) &&
  272. + require_align(vs, s->lmul) &&
  273. + require_noover(vd, s->lmul + 1, vs, s->lmul);
  274. +}
  275. +
  276. +static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
  277. +{
  278. + return vext_wide_check_common(s, vd, vm) &&
  279. + require_align(vs, s->lmul + 1);
  280. +}
  281. +
  282. +/*
  283. + * Check function for vector instruction with format:
  284. + * double-width result and single-width sources (2*SEW = SEW op SEW)
  285. *
  286. - * 1. The destination vector register group for a masked vector instruction can
  287. - * only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
  288. + * Rules to be checked here:
  289. + * 1. All rules in defined in widen common rules are applied.
  290. + * 2. Source (vs2, vs1) vector register number are multiples of LMUL.
  291. + * (Section 3.3.2)
  292. + * 3. Destination vector register cannot overlap a source vector
  293. + * register (vs2, vs1) group.
  294. + * (Section 5.2)
  295. + */
  296. +static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
  297. +{
  298. + return vext_check_ds(s, vd, vs2, vm) &&
  299. + require_align(vs1, s->lmul) &&
  300. + require_noover(vd, s->lmul + 1, vs1, s->lmul);
  301. +}
  302. +
  303. +/*
  304. + * Check function for vector instruction with format:
  305. + * double-width result and double-width source1 and single-width
  306. + * source2 (2*SEW = 2*SEW op SEW)
  307. *
  308. - * 2. In widen instructions and some other insturctions, like vslideup.vx,
  309. - * there is no need to check whether LMUL=1.
  310. + * Rules to be checked here:
  311. + * 1. All rules in defined in widen common rules are applied.
  312. + * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
  313. + * (Section 3.3.2)
  314. + * 3. Source 2 (vs1) vector register number is multiples of LMUL.
  315. + * (Section 3.3.2)
  316. + * 4. Destination vector register cannot overlap a source vector
  317. + * register (vs1) group.
  318. + * (Section 5.2)
  319. */
  320. -static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
  321. - bool force)
  322. +static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
  323. {
  324. - return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
  325. + return vext_check_ds(s, vd, vs1, vm) &&
  326. + require_align(vs2, s->lmul + 1);
  327. }
  328. -/* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
  329. -static bool vext_check_nf(DisasContext *s, uint32_t nf)
  330. +static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
  331. {
  332. - return (1 << s->lmul) * nf <= 8;
  333. + bool ret = vext_narrow_check_common(s, vd, vs, vm);
  334. + if (vd != vs) {
  335. + ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
  336. + }
  337. + return ret;
  338. }
  339. /*
  340. - * The destination vector register group cannot overlap a source vector register
  341. - * group of a different element width. (Section 11.2)
  342. + * Check function for vector instruction with format:
  343. + * single-width result and double-width source 1 and single-width
  344. + * source 2 (SEW = 2*SEW op SEW)
  345. + *
  346. + * Rules to be checked here:
  347. + * 1. All rules in defined in narrow common rules are applied.
  348. + * 2. Destination vector register cannot overlap a source vector
  349. + * register (vs2) group.
  350. + * (Section 5.2)
  351. + * 3. Source 2 (vs1) vector register number is multiples of LMUL.
  352. + * (Section 3.3.2)
  353. */
  354. -static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen)
  355. +static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
  356. {
  357. - return ((rd >= rs + slen) || (rs >= rd + dlen));
  358. + return vext_check_sd(s, vd, vs2, vm) &&
  359. + require_align(vs1, s->lmul);
  360. }
  361. +
  362. +/*
  363. + * Check function for vector reduction instructions.
  364. + *
  365. + * Rules to be checked here:
  366. + * 1. Source 1 (vs2) vector register number is multiples of LMUL.
  367. + * (Section 3.3.2)
  368. + */
  369. +static bool vext_check_reduction(DisasContext *s, int vs2)
  370. +{
  371. + return require_align(vs2, s->lmul) && (s->vstart == 0);
  372. +}
  373. +
  374. +/*
  375. + * Check function for vector slide instructions.
  376. + *
  377. + * Rules to be checked here:
  378. + * 1. Source 1 (vs2) vector register number is multiples of LMUL.
  379. + * (Section 3.3.2)
  380. + * 2. Destination vector register number is multiples of LMUL.
  381. + * (Section 3.3.2)
  382. + * 3. Destination vector register group for a masked vector
  383. + * instruction cannot overlap the source mask register (v0).
  384. + * (Section 5.3)
  385. + * 4. The destination vector register group for vslideup, vslide1up,
  386. + * vfslide1up, cannot overlap the source vector register (vs2) group.
  387. + * (Section 5.2, 17.3.1, 17.3.3)
  388. + */
  389. +static bool vext_check_slide(DisasContext *s, int vd, int vs2,
  390. + int vm, bool is_over)
  391. +{
  392. + bool ret = require_align(vs2, s->lmul) &&
  393. + require_align(vd, s->lmul) &&
  394. + require_vm(vm, vd);
  395. + if (is_over) {
  396. + ret &= (vd != vs2);
  397. + }
  398. + return ret;
  399. +}
  400. +
  401. +/*
  402. + * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
  403. + * So RVV is also be checked in this function.
  404. + */
  405. +static bool vext_check_isa_ill(DisasContext *s)
  406. +{
  407. + return !s->vill;
  408. +}
  409. +
  410. /* common translation macro */
  411. #define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \
  412. static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
  413. @@ -803,11 +1108,9 @@ GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check)
  414. static bool opivv_check(DisasContext *s, arg_rmrr *a)
  415. {
  416. - return (vext_check_isa_ill(s) &&
  417. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  418. - vext_check_reg(s, a->rd, false) &&
  419. - vext_check_reg(s, a->rs2, false) &&
  420. - vext_check_reg(s, a->rs1, false));
  421. + return require_rvv(s) &&
  422. + vext_check_isa_ill(s) &&
  423. + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
  424. }
  425. typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
  426. @@ -898,10 +1201,9 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
  427. static bool opivx_check(DisasContext *s, arg_rmrr *a)
  428. {
  429. - return (vext_check_isa_ill(s) &&
  430. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  431. - vext_check_reg(s, a->rd, false) &&
  432. - vext_check_reg(s, a->rs2, false));
  433. + return require_rvv(s) &&
  434. + vext_check_isa_ill(s) &&
  435. + vext_check_ss(s, a->rd, a->rs2, a->vm);
  436. }
  437. typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
  438. @@ -1098,16 +1400,9 @@ GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
  439. /* OPIVV with WIDEN */
  440. static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
  441. {
  442. - return (vext_check_isa_ill(s) &&
  443. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  444. - vext_check_reg(s, a->rd, true) &&
  445. - vext_check_reg(s, a->rs2, false) &&
  446. - vext_check_reg(s, a->rs1, false) &&
  447. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
  448. - 1 << s->lmul) &&
  449. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
  450. - 1 << s->lmul) &&
  451. - (s->lmul < 0x3) && (s->sew < 0x3));
  452. + return require_rvv(s) &&
  453. + vext_check_isa_ill(s) &&
  454. + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
  455. }
  456. static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
  457. @@ -1152,13 +1447,9 @@ GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
  458. /* OPIVX with WIDEN */
  459. static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
  460. {
  461. - return (vext_check_isa_ill(s) &&
  462. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  463. - vext_check_reg(s, a->rd, true) &&
  464. - vext_check_reg(s, a->rs2, false) &&
  465. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
  466. - 1 << s->lmul) &&
  467. - (s->lmul < 0x3) && (s->sew < 0x3));
  468. + return require_rvv(s) &&
  469. + vext_check_isa_ill(s) &&
  470. + vext_check_ds(s, a->rd, a->rs2, a->vm);
  471. }
  472. static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
  473. @@ -1189,14 +1480,9 @@ GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
  474. /* WIDEN OPIVV with WIDEN */
  475. static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
  476. {
  477. - return (vext_check_isa_ill(s) &&
  478. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  479. - vext_check_reg(s, a->rd, true) &&
  480. - vext_check_reg(s, a->rs2, true) &&
  481. - vext_check_reg(s, a->rs1, false) &&
  482. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
  483. - 1 << s->lmul) &&
  484. - (s->lmul < 0x3) && (s->sew < 0x3));
  485. + return require_rvv(s) &&
  486. + vext_check_isa_ill(s) &&
  487. + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
  488. }
  489. static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
  490. @@ -1239,11 +1525,9 @@ GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
  491. /* WIDEN OPIVX with WIDEN */
  492. static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
  493. {
  494. - return (vext_check_isa_ill(s) &&
  495. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  496. - vext_check_reg(s, a->rd, true) &&
  497. - vext_check_reg(s, a->rs2, true) &&
  498. - (s->lmul < 0x3) && (s->sew < 0x3));
  499. + return require_rvv(s) &&
  500. + vext_check_isa_ill(s) &&
  501. + vext_check_dd(s, a->rd, a->rs2, a->vm);
  502. }
  503. static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
  504. @@ -1304,11 +1588,10 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
  505. */
  506. static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
  507. {
  508. - return (vext_check_isa_ill(s) &&
  509. - vext_check_reg(s, a->rd, false) &&
  510. - vext_check_reg(s, a->rs2, false) &&
  511. - vext_check_reg(s, a->rs1, false) &&
  512. - ((a->rd != 0) || (s->lmul == 0)));
  513. + return require_rvv(s) &&
  514. + vext_check_isa_ill(s) &&
  515. + (a->rd != 0) &&
  516. + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
  517. }
  518. GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
  519. @@ -1320,11 +1603,9 @@ GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
  520. */
  521. static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
  522. {
  523. - return (vext_check_isa_ill(s) &&
  524. - vext_check_reg(s, a->rs2, false) &&
  525. - vext_check_reg(s, a->rs1, false) &&
  526. - vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
  527. - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
  528. + return require_rvv(s) &&
  529. + vext_check_isa_ill(s) &&
  530. + vext_check_mss(s, a->rd, a->rs1, a->rs2);
  531. }
  532. GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
  533. @@ -1332,10 +1613,10 @@ GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
  534. static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
  535. {
  536. - return (vext_check_isa_ill(s) &&
  537. - vext_check_reg(s, a->rd, false) &&
  538. - vext_check_reg(s, a->rs2, false) &&
  539. - ((a->rd != 0) || (s->lmul == 0)));
  540. + return require_rvv(s) &&
  541. + vext_check_isa_ill(s) &&
  542. + (a->rd != 0) &&
  543. + vext_check_ss(s, a->rd, a->rs2, a->vm);
  544. }
  545. /* OPIVX without GVEC IR */
  546. @@ -1358,9 +1639,9 @@ GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
  547. static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
  548. {
  549. - return (vext_check_isa_ill(s) &&
  550. - vext_check_reg(s, a->rs2, false) &&
  551. - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
  552. + return require_rvv(s) &&
  553. + vext_check_isa_ill(s) &&
  554. + vext_check_ms(s, a->rd, a->rs2);
  555. }
  556. GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
  557. @@ -1451,14 +1732,9 @@ GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx, sari)
  558. /* Vector Narrowing Integer Right Shift Instructions */
  559. static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
  560. {
  561. - return (vext_check_isa_ill(s) &&
  562. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  563. - vext_check_reg(s, a->rd, false) &&
  564. - vext_check_reg(s, a->rs2, true) &&
  565. - vext_check_reg(s, a->rs1, false) &&
  566. - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
  567. - 2 << s->lmul) &&
  568. - (s->lmul < 0x3) && (s->sew < 0x3));
  569. + return require_rvv(s) &&
  570. + vext_check_isa_ill(s) &&
  571. + vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
  572. }
  573. /* OPIVV with NARROW */
  574. @@ -1492,13 +1768,9 @@ GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
  575. static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
  576. {
  577. - return (vext_check_isa_ill(s) &&
  578. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  579. - vext_check_reg(s, a->rd, false) &&
  580. - vext_check_reg(s, a->rs2, true) &&
  581. - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
  582. - 2 << s->lmul) &&
  583. - (s->lmul < 0x3) && (s->sew < 0x3));
  584. + return require_rvv(s) &&
  585. + vext_check_isa_ill(s) &&
  586. + vext_check_sd(s, a->rd, a->rs2, a->vm);
  587. }
  588. /* OPIVX with NARROW */
  589. @@ -1546,13 +1818,11 @@ GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx)
  590. */
  591. static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
  592. {
  593. - return (vext_check_isa_ill(s) &&
  594. - vext_check_reg(s, a->rs2, false) &&
  595. - vext_check_reg(s, a->rs1, false) &&
  596. - ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
  597. - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
  598. - (s->lmul == 0)));
  599. + return require_rvv(s) &&
  600. + vext_check_isa_ill(s) &&
  601. + vext_check_mss(s, a->rd, a->rs1, a->rs2);
  602. }
  603. +
  604. GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
  605. GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
  606. GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
  607. @@ -1562,10 +1832,9 @@ GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
  608. static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
  609. {
  610. - return (vext_check_isa_ill(s) &&
  611. - vext_check_reg(s, a->rs2, false) &&
  612. - (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
  613. - (s->lmul == 0)));
  614. + return require_rvv(s) &&
  615. + vext_check_isa_ill(s) &&
  616. + vext_check_ms(s, a->rd, a->rs2);
  617. }
  618. GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
  619. @@ -1644,10 +1913,10 @@ GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
  620. /* Vector Integer Merge and Move Instructions */
  621. static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
  622. {
  623. - if (vext_check_isa_ill(s) &&
  624. - vext_check_reg(s, a->rd, false) &&
  625. - vext_check_reg(s, a->rs1, false)) {
  626. -
  627. + if (require_rvv(s) &&
  628. + vext_check_isa_ill(s) &&
  629. + /* vmv.v.v has rs2 = 0 and vm = 1 */
  630. + vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
  631. if (s->vl_eq_vlmax) {
  632. tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
  633. vreg_ofs(s, a->rs1),
  634. @@ -1674,9 +1943,10 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
  635. typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
  636. static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
  637. {
  638. - if (vext_check_isa_ill(s) &&
  639. - vext_check_reg(s, a->rd, false)) {
  640. -
  641. + if (require_rvv(s) &&
  642. + vext_check_isa_ill(s) &&
  643. + /* vmv.v.x has rs2 = 0 and vm = 1 */
  644. + vext_check_ss(s, a->rd, 0, 1)) {
  645. TCGv s1;
  646. TCGLabel *over = gen_new_label();
  647. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  648. @@ -1717,9 +1987,10 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
  649. static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
  650. {
  651. - if (vext_check_isa_ill(s) &&
  652. - vext_check_reg(s, a->rd, false)) {
  653. -
  654. + if (require_rvv(s) &&
  655. + vext_check_isa_ill(s) &&
  656. + /* vmv.v.i has rs2 = 0 and vm = 1 */
  657. + vext_check_ss(s, a->rd, 0, 1)) {
  658. int64_t simm = sextract64(a->rs1, 0, 5);
  659. if (s->vl_eq_vlmax) {
  660. tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
  661. @@ -1821,12 +2092,10 @@ GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
  662. */
  663. static bool opfvv_check(DisasContext *s, arg_rmrr *a)
  664. {
  665. - return (vext_check_isa_ill(s) &&
  666. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  667. - vext_check_reg(s, a->rd, false) &&
  668. - vext_check_reg(s, a->rs2, false) &&
  669. - vext_check_reg(s, a->rs1, false) &&
  670. - (s->sew != 0));
  671. + return require_rvv(s) &&
  672. + require_rvf(s) &&
  673. + vext_check_isa_ill(s) &&
  674. + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
  675. }
  676. /* OPFVV without GVEC IR */
  677. @@ -1891,17 +2160,16 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
  678. return true;
  679. }
  680. -static bool opfvf_check(DisasContext *s, arg_rmrr *a)
  681. -{
  682. /*
  683. * If the current SEW does not correspond to a supported IEEE floating-point
  684. * type, an illegal instruction exception is raised
  685. */
  686. - return (vext_check_isa_ill(s) &&
  687. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  688. - vext_check_reg(s, a->rd, false) &&
  689. - vext_check_reg(s, a->rs2, false) &&
  690. - (s->sew != 0));
  691. +static bool opfvf_check(DisasContext *s, arg_rmrr *a)
  692. +{
  693. + return require_rvv(s) &&
  694. + require_rvf(s) &&
  695. + vext_check_isa_ill(s) &&
  696. + vext_check_ss(s, a->rd, a->rs2, a->vm);
  697. }
  698. /* OPFVF without GVEC IR */
  699. @@ -1931,16 +2199,10 @@ GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
  700. /* Vector Widening Floating-Point Add/Subtract Instructions */
  701. static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
  702. {
  703. - return (vext_check_isa_ill(s) &&
  704. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  705. - vext_check_reg(s, a->rd, true) &&
  706. - vext_check_reg(s, a->rs2, false) &&
  707. - vext_check_reg(s, a->rs1, false) &&
  708. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
  709. - 1 << s->lmul) &&
  710. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
  711. - 1 << s->lmul) &&
  712. - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
  713. + return require_rvv(s) &&
  714. + require_rvf(s) &&
  715. + vext_check_isa_ill(s) &&
  716. + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
  717. }
  718. /* OPFVV with WIDEN */
  719. @@ -1974,13 +2236,10 @@ GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
  720. static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
  721. {
  722. - return (vext_check_isa_ill(s) &&
  723. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  724. - vext_check_reg(s, a->rd, true) &&
  725. - vext_check_reg(s, a->rs2, false) &&
  726. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
  727. - 1 << s->lmul) &&
  728. - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
  729. + return require_rvv(s) &&
  730. + require_rvf(s) &&
  731. + vext_check_isa_ill(s) &&
  732. + vext_check_ds(s, a->rd, a->rs2, a->vm);
  733. }
  734. /* OPFVF with WIDEN */
  735. @@ -2006,14 +2265,10 @@ GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
  736. static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
  737. {
  738. - return (vext_check_isa_ill(s) &&
  739. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  740. - vext_check_reg(s, a->rd, true) &&
  741. - vext_check_reg(s, a->rs2, true) &&
  742. - vext_check_reg(s, a->rs1, false) &&
  743. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
  744. - 1 << s->lmul) &&
  745. - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
  746. + return require_rvv(s) &&
  747. + require_rvf(s) &&
  748. + vext_check_isa_ill(s) &&
  749. + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
  750. }
  751. /* WIDEN OPFVV with WIDEN */
  752. @@ -2047,11 +2302,10 @@ GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
  753. static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
  754. {
  755. - return (vext_check_isa_ill(s) &&
  756. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  757. - vext_check_reg(s, a->rd, true) &&
  758. - vext_check_reg(s, a->rs2, true) &&
  759. - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
  760. + return require_rvv(s) &&
  761. + require_rvf(s) &&
  762. + vext_check_isa_ill(s) &&
  763. + vext_check_dd(s, a->rd, a->rs2, a->vm);
  764. }
  765. /* WIDEN OPFVF with WIDEN */
  766. @@ -2122,11 +2376,11 @@ GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
  767. */
  768. static bool opfv_check(DisasContext *s, arg_rmr *a)
  769. {
  770. - return (vext_check_isa_ill(s) &&
  771. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  772. - vext_check_reg(s, a->rd, false) &&
  773. - vext_check_reg(s, a->rs2, false) &&
  774. - (s->sew != 0));
  775. + return require_rvv(s) &&
  776. + require_rvf(s) &&
  777. + vext_check_isa_ill(s) &&
  778. + /* OPFV instructions ignore vs1 check */
  779. + vext_check_ss(s, a->rd, a->rs2, a->vm);
  780. }
  781. #define GEN_OPFV_TRANS(NAME, CHECK) \
  782. @@ -2174,13 +2428,10 @@ GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
  783. /* Vector Floating-Point Compare Instructions */
  784. static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
  785. {
  786. - return (vext_check_isa_ill(s) &&
  787. - vext_check_reg(s, a->rs2, false) &&
  788. - vext_check_reg(s, a->rs1, false) &&
  789. - (s->sew != 0) &&
  790. - ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
  791. - vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
  792. - (s->lmul == 0)));
  793. + return require_rvv(s) &&
  794. + require_rvf(s) &&
  795. + vext_check_isa_ill(s) &&
  796. + vext_check_mss(s, a->rd, a->rs1, a->rs2);
  797. }
  798. GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
  799. @@ -2191,11 +2442,10 @@ GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
  800. static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
  801. {
  802. - return (vext_check_isa_ill(s) &&
  803. - vext_check_reg(s, a->rs2, false) &&
  804. - (s->sew != 0) &&
  805. - (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
  806. - (s->lmul == 0)));
  807. + return require_rvv(s) &&
  808. + require_rvf(s) &&
  809. + vext_check_isa_ill(s) &&
  810. + vext_check_ms(s, a->rd, a->rs2);
  811. }
  812. GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
  813. @@ -2214,10 +2464,10 @@ GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
  814. static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
  815. {
  816. - if (vext_check_isa_ill(s) &&
  817. - vext_check_reg(s, a->rd, false) &&
  818. - (s->sew != 0)) {
  819. -
  820. + if (require_rvv(s) &&
  821. + require_rvf(s) &&
  822. + vext_check_isa_ill(s) &&
  823. + require_align(a->rd, s->lmul)) {
  824. if (s->vl_eq_vlmax) {
  825. tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
  826. MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
  827. @@ -2263,13 +2513,11 @@ GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
  828. */
  829. static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
  830. {
  831. - return (vext_check_isa_ill(s) &&
  832. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  833. - vext_check_reg(s, a->rd, true) &&
  834. - vext_check_reg(s, a->rs2, false) &&
  835. - vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
  836. - 1 << s->lmul) &&
  837. - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
  838. + return require_rvv(s) &&
  839. + require_scale_rvf(s) &&
  840. + (s->sew != MO_8) &&
  841. + vext_check_isa_ill(s) &&
  842. + vext_check_ds(s, a->rd, a->rs2, a->vm);
  843. }
  844. #define GEN_OPFV_WIDEN_TRANS(NAME) \
  845. @@ -2311,13 +2559,12 @@ GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
  846. */
  847. static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
  848. {
  849. - return (vext_check_isa_ill(s) &&
  850. - vext_check_overlap_mask(s, a->rd, a->vm, false) &&
  851. - vext_check_reg(s, a->rd, false) &&
  852. - vext_check_reg(s, a->rs2, true) &&
  853. - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
  854. - 2 << s->lmul) &&
  855. - (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
  856. + return require_rvv(s) &&
  857. + require_rvf(s) &&
  858. + (s->sew != MO_64) &&
  859. + vext_check_isa_ill(s) &&
  860. + /* OPFV narrowing instructions ignore vs1 check */
  861. + vext_check_sd(s, a->rd, a->rs2, a->vm);
  862. }
  863. #define GEN_OPFV_NARROW_TRANS(NAME) \
  864. @@ -2357,7 +2604,9 @@ GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
  865. /* Vector Single-Width Integer Reduction Instructions */
  866. static bool reduction_check(DisasContext *s, arg_rmrr *a)
  867. {
  868. - return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false);
  869. + return require_rvv(s) &&
  870. + vext_check_isa_ill(s) &&
  871. + vext_check_reduction(s, a->rs2);
  872. }
  873. GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
  874. @@ -2370,8 +2619,13 @@ GEN_OPIVV_TRANS(vredor_vs, reduction_check)
  875. GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
  876. /* Vector Widening Integer Reduction Instructions */
  877. -GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_check)
  878. -GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check)
  879. +static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
  880. +{
  881. + return reduction_check(s, a) && (s->sew < MO_64);
  882. +}
  883. +
  884. +GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
  885. +GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
  886. /* Vector Single-Width Floating-Point Reduction Instructions */
  887. GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
  888. @@ -2419,7 +2673,8 @@ GEN_MM_TRANS(vmxnor_mm)
  889. /* Vector mask population count vmpopc */
  890. static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
  891. {
  892. - if (vext_check_isa_ill(s)) {
  893. + if (require_rvv(s) &&
  894. + vext_check_isa_ill(s)) {
  895. TCGv_ptr src2, mask;
  896. TCGv dst;
  897. TCGv_i32 desc;
  898. @@ -2450,7 +2705,8 @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
  899. /* vmfirst find-first-set mask bit */
  900. static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
  901. {
  902. - if (vext_check_isa_ill(s)) {
  903. + if (require_rvv(s) &&
  904. + vext_check_isa_ill(s)) {
  905. TCGv_ptr src2, mask;
  906. TCGv dst;
  907. TCGv_i32 desc;
  908. @@ -2509,10 +2765,11 @@ GEN_M_TRANS(vmsof_m)
  909. /* Vector Iota Instruction */
  910. static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
  911. {
  912. - if (vext_check_isa_ill(s) &&
  913. - vext_check_reg(s, a->rd, false) &&
  914. - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, 1) &&
  915. - (a->vm != 0 || a->rd != 0)) {
  916. + if (require_rvv(s) &&
  917. + vext_check_isa_ill(s) &&
  918. + require_noover(a->rd, s->lmul, a->rs2, 0) &&
  919. + require_vm(a->vm, a->rd) &&
  920. + require_align(a->rd, s->lmul)) {
  921. uint32_t data = 0;
  922. TCGLabel *over = gen_new_label();
  923. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  924. @@ -2536,9 +2793,10 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
  925. /* Vector Element Index Instruction */
  926. static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
  927. {
  928. - if (vext_check_isa_ill(s) &&
  929. - vext_check_reg(s, a->rd, false) &&
  930. - vext_check_overlap_mask(s, a->rd, a->vm, false)) {
  931. + if (require_rvv(s) &&
  932. + vext_check_isa_ill(s) &&
  933. + require_align(a->rd, s->lmul) &&
  934. + require_vm(a->vm, a->rd)) {
  935. uint32_t data = 0;
  936. TCGLabel *over = gen_new_label();
  937. tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
  938. @@ -2788,41 +3046,48 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
  939. /* Vector Slide Instructions */
  940. static bool slideup_check(DisasContext *s, arg_rmrr *a)
  941. {
  942. - return (vext_check_isa_ill(s) &&
  943. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  944. - vext_check_reg(s, a->rd, false) &&
  945. - vext_check_reg(s, a->rs2, false) &&
  946. - (a->rd != a->rs2));
  947. + return require_rvv(s) &&
  948. + vext_check_isa_ill(s) &&
  949. + vext_check_slide(s, a->rd, a->rs2, a->vm, true);
  950. }
  951. GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
  952. GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
  953. GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
  954. -GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
  955. -GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
  956. -GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
  957. +static bool slidedown_check(DisasContext *s, arg_rmrr *a)
  958. +{
  959. + return require_rvv(s) &&
  960. + vext_check_isa_ill(s) &&
  961. + vext_check_slide(s, a->rd, a->rs2, a->vm, false);
  962. +}
  963. +
  964. +GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
  965. +GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
  966. +GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, slidedown_check)
  967. /* Vector Register Gather Instruction */
  968. static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
  969. {
  970. - return (vext_check_isa_ill(s) &&
  971. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  972. - vext_check_reg(s, a->rd, false) &&
  973. - vext_check_reg(s, a->rs1, false) &&
  974. - vext_check_reg(s, a->rs2, false) &&
  975. - (a->rd != a->rs2) && (a->rd != a->rs1));
  976. + return require_rvv(s) &&
  977. + vext_check_isa_ill(s) &&
  978. + require_align(a->rd, s->lmul) &&
  979. + require_align(a->rs1, s->lmul) &&
  980. + require_align(a->rs2, s->lmul) &&
  981. + (a->rd != a->rs2 && a->rd != a->rs1) &&
  982. + require_vm(a->vm, a->rd);
  983. }
  984. GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
  985. static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
  986. {
  987. - return (vext_check_isa_ill(s) &&
  988. - vext_check_overlap_mask(s, a->rd, a->vm, true) &&
  989. - vext_check_reg(s, a->rd, false) &&
  990. - vext_check_reg(s, a->rs2, false) &&
  991. - (a->rd != a->rs2));
  992. + return require_rvv(s) &&
  993. + vext_check_isa_ill(s) &&
  994. + require_align(a->rd, s->lmul) &&
  995. + require_align(a->rs2, s->lmul) &&
  996. + (a->rd != a->rs2) &&
  997. + require_vm(a->vm, a->rd);
  998. }
  999. /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
  1000. @@ -2883,14 +3148,20 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
  1001. return true;
  1002. }
  1003. -/* Vector Compress Instruction */
  1004. +/*
  1005. + * Vector Compress Instruction
  1006. + *
  1007. + * The destination vector register group cannot overlap the
  1008. + * source vector register group or the source mask register.
  1009. + */
  1010. static bool vcompress_vm_check(DisasContext *s, arg_r *a)
  1011. {
  1012. - return (vext_check_isa_ill(s) &&
  1013. - vext_check_reg(s, a->rd, false) &&
  1014. - vext_check_reg(s, a->rs2, false) &&
  1015. - vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs1, 1) &&
  1016. - (a->rd != a->rs2));
  1017. + return require_rvv(s) &&
  1018. + vext_check_isa_ill(s) &&
  1019. + require_align(a->rd, s->lmul) &&
  1020. + require_align(a->rs2, s->lmul) &&
  1021. + (a->rd != a->rs2) &&
  1022. + !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
  1023. }
  1024. static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
  1025. --
  1026. 2.33.1